diff --git a/LICENSE.txt b/LICENSE.txt index 9819dea0464..2e08754fea8 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -765,6 +765,7 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery Apache HBase - Server which contains JQuery minified javascript library version 1.8.3 +Microsoft JDBC Driver for SQLServer - version 6.2.1.jre7 -------------------------------------------------------------------------------- Copyright 2005, 2012, 2013 jQuery Foundation and other contributors, https://jquery.org/ diff --git a/dev-support/docker/hadoop_env_checks.sh b/dev-support/docker/hadoop_env_checks.sh index 910c802291a..5cb4b2b3b95 100755 --- a/dev-support/docker/hadoop_env_checks.sh +++ b/dev-support/docker/hadoop_env_checks.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file diff --git a/dev-support/findHangingTest.sh b/dev-support/findHangingTest.sh index f7ebe47f093..fcda9ffb8c9 100644 --- a/dev-support/findHangingTest.sh +++ b/dev-support/findHangingTest.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file diff --git a/dev-support/verify-xml.sh b/dev-support/verify-xml.sh index abab4e69f2b..9ef456a777d 100755 --- a/dev-support/verify-xml.sh +++ b/dev-support/verify-xml.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml index 74ce9bcf768..289061f8add 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml @@ -61,6 +61,7 @@ stop-yarn.sh start-yarn.cmd stop-yarn.cmd + FederationStateStore**/** 0755 diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml index e495a69e1ac..2f31fa6df27 100644 --- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml @@ -46,7 +46,6 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4 org.codehaus.mojo diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml index 68d1f5b8d35..0e23db939c5 100644 --- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml +++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml @@ -50,7 +50,6 @@ org.apache.maven.plugins maven-enforcer-plugin - 1.4 org.codehaus.mojo diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml index 93811ad698d..5cf1fad8c8b 100644 --- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml +++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml @@ -126,6 +126,10 @@ javax.xml.bind jaxb-api + + xml-apis + xml-apis + org.apache.avro avro @@ -624,6 +628,13 @@ **/*.class + + org.apache.hadoop:hadoop-mapreduce-client-jobclient:* + + testjar/* + testshell/* + + @@ -646,6 +657,7 @@ org/junit/* org/junit/**/* + org/ietf/jgss/* org/omg/**/* org/w3c/dom/* @@ -654,6 +666,13 @@ org/xml/sax/**/* + + contribs/ + ${shaded.dependency.prefix}.contribs. + + **/pom.xml + + com/ ${shaded.dependency.prefix}.com. @@ -691,6 +710,13 @@ io/serializations + + javassist/ + ${shaded.dependency.prefix}.javassist. + + **/pom.xml + + javax/el/ ${shaded.dependency.prefix}.javax.el. @@ -712,6 +738,13 @@ **/pom.xml + + jersey/ + ${shaded.dependency.prefix}.jersey. + + **/pom.xml + + net/ ${shaded.dependency.prefix}.net. diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml index 2f64152b8b8..24c6b7a8365 100644 --- a/hadoop-client-modules/hadoop-client-runtime/pom.xml +++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml @@ -174,13 +174,6 @@ org/apache/jasper/compiler/Localizer.class - - - xerces:xercesImpl - - META-INF/services/* - - com.sun.jersey:* diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index de76afbcbc3..4bafd8e0223 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -323,6 +323,10 @@ + + + + + + fs.wasb.impl + org.apache.hadoop.fs.azure.NativeAzureFileSystem + The implementation class of the Native Azure Filesystem + + + + fs.wasbs.impl + org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure + The implementation class of the Secure Native Azure Filesystem + + fs.azure.secure.mode false @@ -2574,11 +2586,16 @@ ClientCredential Defines Azure Active Directory OAuth2 access token provider type. - Supported types are ClientCredential, RefreshToken, and Custom. + Supported types are ClientCredential, RefreshToken, MSI, DeviceCode, + and Custom. The ClientCredential type requires property fs.adl.oauth2.client.id, fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url. The RefreshToken type requires property fs.adl.oauth2.client.id and fs.adl.oauth2.refresh.token. + The MSI type requires properties fs.adl.oauth2.msi.port and + fs.adl.oauth2.msi.tenantguid. + The DeviceCode type requires property + fs.adl.oauth2.devicecode.clientapp.id. The Custom type requires property fs.adl.oauth2.access.token.provider. @@ -2615,6 +2632,36 @@ + + fs.adl.oauth2.msi.port + + + The localhost port for the MSI token service. This is the port specified + when creating the Azure VM. + Used by MSI token provider. + + + + + fs.adl.oauth2.msi.tenantguid + + + The tenant guid for the Azure AAD tenant under which the azure data lake + store account is created. + Used by MSI token provider. + + + + + fs.adl.oauth2.devicecode.clientapp.id + + + The app id of the AAD native app in whose context the auth request + should be made. + Used by DeviceCode token provider. + + + @@ -2663,4 +2710,50 @@ This determines the number of open file handles. + + + Host:Port of the ZooKeeper server to be used. + + hadoop.zk.address + + + + + Number of tries to connect to ZooKeeper. + hadoop.zk.num-retries + 1000 + + + + Retry interval in milliseconds when connecting to ZooKeeper. + + hadoop.zk.retry-interval-ms + 1000 + + + + ZooKeeper session timeout in milliseconds. Session expiration + is managed by the ZooKeeper cluster itself, not by the client. This value is + used by the cluster to determine when the client's session expires. + Expirations happens when the cluster does not hear from the client within + the specified session timeout period (i.e. no heartbeat). + hadoop.zk.timeout-ms + 10000 + + + + ACL's to be used for ZooKeeper znodes. + hadoop.zk.acl + world:anyone:rwcda + + + + + Specify the auths to be used for the ACL's specified in hadoop.zk.acl. + This takes a comma-separated list of authentication mechanisms, each of the + form 'scheme:auth' (the same syntax used for the 'addAuth' command in + the ZK CLI). + + hadoop.zk.auth + diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md index 0a594abe0c9..71eec75be92 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md @@ -676,11 +676,11 @@ stat Usage: `hadoop fs -stat [format] ...` -Print statistics about the file/directory at \ in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default. +Print statistics about the file/directory at \ in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), access date(%x, %X), and modification date (%y, %Y). %x and %y show UTC date as "yyyy-MM-dd HH:mm:ss", and %X and %Y show milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default. Example: -* `hadoop fs -stat "%F %a %u:%g %b %y %n" /file` +* `hadoop fs -stat "type:%F perm:%a %u:%g size:%b mtime:%y atime:%x name:%n" /file` Exit Code: Returns 0 on success and -1 on error. diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index 4b89bc2a581..4543facc441 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -145,6 +145,9 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a | `CreateSymlinkOps` | Total number of createSymlink operations | | `GetLinkTargetOps` | Total number of getLinkTarget operations | | `FilesInGetListingOps` | Total number of files and directories listed by directory listing operations | +| `SuccessfulReReplications` | Total number of successful block re-replications | +| `NumTimesReReplicationNotScheduled` | Total number of times that failed to schedule a block re-replication | +| `TimeoutReReplications` | Total number of timed out block re-replications | | `AllowSnapshotOps` | Total number of allowSnapshot operations | | `DisallowSnapshotOps` | Total number of disallowSnapshot operations | | `CreateSnapshotOps` | Total number of createSnapshot operations | @@ -157,8 +160,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a | `SyncsNumOps` | Total number of Journal syncs | | `SyncsAvgTime` | Average time of Journal syncs in milliseconds | | `TransactionsBatchedInSync` | Total number of Journal transactions batched in sync | -| `BlockReportNumOps` | Total number of processing block reports from DataNode | -| `BlockReportAvgTime` | Average time of processing block reports in milliseconds | +| `StorageBlockReportNumOps` | Total number of processing block reports from individual storages in DataNode | +| `StorageBlockReportAvgTime` | Average time of processing block reports in milliseconds | | `CacheReportNumOps` | Total number of processing cache reports from DataNode | | `CacheReportAvgTime` | Average time of processing cache reports in milliseconds | | `SafeModeTime` | The interval between FSNameSystem starts and the last time safemode leaves in milliseconds.  (sometimes not equal to the time in SafeMode, see [HDFS-5156](https://issues.apache.org/jira/browse/HDFS-5156)) | @@ -176,6 +179,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a | `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds | | `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK | | `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds | +| `ResourceCheckTime`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. | +| `StorageBlockReport`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. | FSNamesystem ------------ @@ -213,7 +218,15 @@ Each metrics record contains tags such as HAState and Hostname as additional inf | `PendingDataNodeMessageCount` | (HA-only) Current number of pending block-related messages for later processing in the standby NameNode | | `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last time standby NameNode load edit log. In active NameNode, set to 0 | | `BlockCapacity` | Current number of block capacity | +| `NumLiveDataNodes` | Number of datanodes which are currently live | +| `NumDeadDataNodes` | Number of datanodes which are currently dead | +| `NumDecomLiveDataNodes` | Number of datanodes which have been decommissioned and are now live | +| `NumDecomDeadDataNodes` | Number of datanodes which have been decommissioned and are now dead | +| `NumDecommissioningDataNodes` | Number of datanodes in decommissioning state | +| `VolumeFailuresTotal` | Total number of volume failures across all Datanodes | +| `EstimatedCapacityLostTotal` | An estimate of the total capacity lost due to volume failures | | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed heartbeat | +| `NumStaleStorages` | Number of storages marked as content stale (after NameNode restart/failover before first block report is received) | | `MissingReplOneBlocks` | Current number of missing blocks with replication factor 1 | | `NumFilesUnderConstruction` | Current number of files under construction | | `NumActiveClients` | Current number of active clients holding lease | @@ -224,6 +237,9 @@ Each metrics record contains tags such as HAState and Hostname as additional inf | `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation| | `NameDirSize` | NameNode name directories size in bytes | | `NumTimedOutPendingReconstructions` | The number of timed out reconstructions. Not the number of unique blocks that timed out. | +| `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state | +| `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state | +| `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state | | `FSN(Read|Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations | | `FSN(Read|Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds | diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md index e1aad5ac8c7..5a62c4fc9dd 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md @@ -142,12 +142,9 @@ In a typical cluster HDFS and YARN services will be launched as the system `hdfs hadoop.security.auth_to_local - RULE:[2:$1@$0](nn/.*@.*REALM.TLD)s/.*/hdfs/ - RULE:[2:$1@$0](jn/.*@.*REALM.TLD)s/.*/hdfs/ - RULE:[2:$1@$0](dn/.*@.*REALM.TLD)s/.*/hdfs/ - RULE:[2:$1@$0](nm/.*@.*REALM.TLD)s/.*/yarn/ - RULE:[2:$1@$0](rm/.*@.*REALM.TLD)s/.*/yarn/ - RULE:[2:$1@$0](jhs/.*@.*REALM.TLD)s/.*/mapred/ + RULE:[2:$1/$2@$0]([ndj]n/.*@REALM.TLD)s/.*/hdfs/ + RULE:[2:$1/$2@$0]([rn]m/.*@REALM.TLD)s/.*/yarn/ + RULE:[2:$1/$2@$0](jhs/.*@REALM.TLD)s/.*/mapred/ DEFAULT diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md b/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md index 7f115c2ecfe..eb3b1227236 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md @@ -78,13 +78,27 @@ A special value of `*` implies that all users are allowed to access the service. If access control list is not defined for a service, the value of `security.service.authorization.default.acl` is applied. If `security.service.authorization.default.acl` is not defined, `*` is applied. -* Blocked Access Control ListsIn some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked". +### Blocked Access Control Lists - Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked` +In some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked". - For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list. +Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked` - If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied. +For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list. + +If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied. + +### Access Control using Lists of IP Addresses, Host Names and IP Ranges + +Access to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts. + +If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied. + +It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked". + +Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked` + +If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied. ### Refreshing Service Level Authorization Configuration @@ -100,16 +114,6 @@ Refresh the service-level authorization configuration for the ResourceManager: Of course, one can use the `security.refresh.policy.protocol.acl` property in `$HADOOP_CONF_DIR/hadoop-policy.xml` to restrict access to the ability to refresh the service-level authorization configuration to certain users/groups. -* Access Control using list of ip addresses, host names and ip rangesAccess to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts. - - If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied. - - It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked". - - Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked` - - If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied. - ### Examples Allow only users `alice`, `bob` and users in the `mapreduce` group to submit jobs to the MapReduce cluster: diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md index 97f9e9aa92a..ffe2aec96ab 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md @@ -180,11 +180,11 @@ It is also possible to add the new subcommands to the usage output. The `hadoop_ ```bash if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then - hadoop_add_subcommand "hello" "Print some text to the screen" + hadoop_add_subcommand "hello" client "Print some text to the screen" fi ``` -This functionality may also be use to override the built-ins. For example, defining: +We set the subcommand type to be "client" as there are no special restrictions, extra capabilities, etc. This functionality may also be use to override the built-ins. For example, defining: ```bash function hdfs_subcommand_fetchdt diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md index b56666c4a26..1e522c7782c 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md @@ -553,7 +553,7 @@ on a path that exists and is a file. Instead the operation returns false. FS' = FS result = False -### `FSDataOutputStream create(Path, ...)` +### `FSDataOutputStream create(Path, ...)` FSDataOutputStream create(Path p, @@ -616,7 +616,24 @@ this precondition fails. * Not covered: symlinks. The resolved path of the symlink is used as the final path argument to the `create()` operation -### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)` +### `FSDataOutputStreamBuilder createFile(Path p)` + +Make a `FSDataOutputStreamBuilder` to specify the parameters to create a file. + +#### Implementation Notes + +`createFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make +change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`, +the builder parameters are verified and [`create(Path p)`](#FileSystem.create) +is invoked on the underlying filesystem. `build()` has the same preconditions +and postconditions as [`create(Path p)`](#FileSystem.create). + +* Similar to [`create(Path p)`](#FileSystem.create), files are overwritten +by default, unless specify `builder.overwrite(false)`. +* Unlike [`create(Path p)`](#FileSystem.create), missing parent directories are +not created by default, unless specify `builder.recursive()`. + +### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)` Implementations without a compliant call SHOULD throw `UnsupportedOperationException`. @@ -634,6 +651,18 @@ Implementations without a compliant call SHOULD throw `UnsupportedOperationExcep Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]` by appending data to the existing list. +### `FSDataOutputStreamBuilder appendFile(Path p)` + +Make a `FSDataOutputStreamBuilder` to specify the parameters to append to an +existing file. + +#### Implementation Notes + +`appendFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make +change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`, +the builder parameters are verified and [`append()`](#FileSystem.append) is +invoked on the underlying filesystem. `build()` has the same preconditions and +postconditions as [`append()`](#FileSystem.append). ### `FSDataInputStream open(Path f, int bufferSize)` @@ -1210,3 +1239,27 @@ try { It is notable that this is *not* done in the Hadoop codebase. This does not imply that robust loops are not recommended —more that the concurrency problems were not considered during the implementation of these loops. + + +## interface `StreamCapabilities` + +The `StreamCapabilities` provides a way to programmatically query the +capabilities that an `OutputStream` supports. + +```java +public interface StreamCapabilities { + boolean hasCapability(String capability); +} +``` + +### `boolean hasCapability(capability)` + +Return true if the `OutputStream` has the desired capability. + +The caller can query the capabilities of a stream using a string value. +It currently supports to query: + + * `StreamCapabilties.HFLUSH` ("*hflush*"): the capability to flush out the data + in client's buffer. + * `StreamCapabilities.HSYNC` ("*hsync*"): capability to flush out the data in + client's buffer and the disk device. \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md new file mode 100644 index 00000000000..4ea1fd168f2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md @@ -0,0 +1,182 @@ + + + + + + +# class `org.apache.hadoop.fs.FSDataOutputStreamBuilder` + + + +Builder pattern for `FSDataOutputStream` and its subclasses. It is used to +create a new file or open an existing file on `FileSystem` for write. + +## Invariants + +The `FSDataOutputStreamBuilder` interface does not validate parameters +and modify the state of `FileSystem` until [`build()`](#Builder.build) is +invoked. + +## Implementation-agnostic parameters. + +### `FSDataOutputStreamBuilder create()` + +Specify `FSDataOutputStreamBuilder` to create a file on `FileSystem`, equivalent +to `CreateFlag#CREATE`. + +### `FSDataOutputStreamBuilder append()` + +Specify `FSDataOutputStreamBuilder` to append to an existing file on +`FileSystem`, equivalent to `CreateFlag#APPEND`. + +### `FSDataOutputStreamBuilder overwrite(boolean overwrite)` + +Specify `FSDataOutputStreamBuilder` to overwrite an existing file or not. If +giving `overwrite==true`, it truncates an existing file, equivalent to +`CreateFlag#OVERWITE`. + +### `FSDataOutputStreamBuilder permission(FsPermission permission)` + +Set permission for the file. + +### `FSDataOutputStreamBuilder bufferSize(int bufSize)` + +Set the size of the buffer to be used. + +### `FSDataOutputStreamBuilder replication(short replica)` + +Set the replication factor. + +### `FSDataOutputStreamBuilder blockSize(long size)` + +Set block size in bytes. + +### `FSDataOutputStreamBuilder recursive()` + +Create parent directories if they do not exist. + +### `FSDataOutputStreamBuilder progress(Progresable prog)` + +Set the facility of reporting progress. + +### `FSDataOutputStreamBuilder checksumOpt(ChecksumOpt chksumOpt)` + +Set checksum opt. + +### Set optional or mandatory parameters + + FSDataOutputStreamBuilder opt(String key, ...) + FSDataOutputStreamBuilder must(String key, ...) + +Set optional or mandatory parameters to the builder. Using `opt()` or `must()`, +client can specify FS-specific parameters without inspecting the concrete type +of `FileSystem`. + + // Don't + if (fs instanceof FooFileSystem) { + FooFileSystem fs = (FooFileSystem) fs; + out = dfs.createFile(path) + .optionA() + .optionB("value") + .cache() + .build() + } else if (fs instanceof BarFileSystem) { + ... + } + + // Do + out = fs.createFile(path) + .permission(perm) + .bufferSize(bufSize) + .opt("foofs:option.a", true) + .opt("foofs:option.b", "value") + .opt("barfs:cache", true) + .must("foofs:cache", true) + .must("barfs:cache-size", 256 * 1024 * 1024) + .build(); + +#### Implementation Notes + +The concrete `FileSystem` and/or `FSDataOutputStreamBuilder` implementation +MUST verify that implementation-agnostic parameters (i.e., "syncable") or +implementation-specific parameters (i.e., "foofs:cache") +are supported. `FileSystem` will satisfy optional parameters (via `opt(key, ...)`) +on best effort. If the mandatory parameters (via `must(key, ...)`) can not be satisfied +in the `FileSystem`, `IllegalArgumentException` should be thrown in `build()`. + +The behavior of resolving the conflicts between the parameters set by +builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is undefined. + +## HDFS-specific parameters. + +`HdfsDataOutputStreamBuilder extends FSDataOutputStreamBuilder` provides additional +HDFS-specific parameters, for further customize file creation / append behavior. + +### `FSDataOutpuStreamBuilder favoredNodes(InetSocketAddress[] nodes)` + +Set favored DataNodes for new blocks. + +### `FSDataOutputStreamBuilder syncBlock()` + +Force closed blocks to the disk device. See `CreateFlag#SYNC_BLOCK` + +### `FSDataOutputStreamBuilder lazyPersist()` + +Create the block on transient storage if possible. + +### `FSDataOutputStreamBuilder newBlock()` + +Append data to a new block instead of the end of the last partial block. + +### `FSDataOutputStreamBuilder noLocalWrite()` + +Advise that a block replica NOT be written to the local DataNode. + +### `FSDataOutputStreamBuilder ecPolicyName()` + +Enforce the file to be a striped file with erasure coding policy 'policyName', +no matter what its parent directory's replication or erasure coding policy is. + +### `FSDataOutputStreamBuilder replicate()` + +Enforce the file to be a replicated file, no matter what its parent directory's +replication or erasure coding policy is. + +## Builder interface + +### `FSDataOutputStream build()` + +Create a new file or append an existing file on the underlying `FileSystem`, +and return `FSDataOutputStream` for write. + +#### Preconditions + +The following combinations of parameters are not supported: + + if APPEND|OVERWRITE: raise HadoopIllegalArgumentException + if CREATE|APPEND|OVERWRITE: raise HadoopIllegalArgumentExdeption + +`FileSystem` may reject the request for other reasons and throw `IOException`, +see `FileSystem#create(path, ...)` and `FileSystem#append()`. + +#### Postconditions + + FS' where : + FS'.Files'[p] == [] + ancestors(p) is-subset-of FS'.Directories' + + result = FSDataOutputStream + +The result is `FSDataOutputStream` to be used to write data to filesystem. diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md index 66a7eb3f364..532b6c7b688 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md @@ -33,5 +33,6 @@ HDFS as these are commonly expected by Hadoop client applications. 1. [Model](model.html) 1. [FileSystem class](filesystem.html) 1. [FSDataInputStream class](fsdatainputstream.html) +1. [FSDataOutputStreamBuilder class](fsdataoutputstreambuilder.html) 2. [Testing with the Filesystem specification](testing.html) 2. [Extending the specification and its tests](extending.html) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index da37e68e2eb..d0e0a351b53 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -103,6 +103,12 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase { xmlPrefixToSkipCompare.add("fs.s3n."); xmlPrefixToSkipCompare.add("s3native."); + // WASB properties are in a different subtree. + // - org.apache.hadoop.fs.azure.NativeAzureFileSystem + xmlPrefixToSkipCompare.add("fs.wasb.impl"); + xmlPrefixToSkipCompare.add("fs.wasbs.impl"); + xmlPrefixToSkipCompare.add("fs.azure."); + // ADL properties are in a different subtree // - org.apache.hadoop.hdfs.web.ADLConfKeys xmlPrefixToSkipCompare.add("adl."); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 5ced541af3b..91f25fa1cad 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -36,6 +36,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; @@ -48,6 +49,7 @@ import static org.junit.Assert.assertArrayEquals; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration.IntegerRanges; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; @@ -55,6 +57,9 @@ import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.mockito.Mockito; public class TestConfiguration extends TestCase { @@ -78,6 +83,11 @@ public class TestConfiguration extends TestCase { /** Four apostrophes. */ public static final String ESCAPED = "''''"; + private static final String SENSITIVE_CONFIG_KEYS = + CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS; + + private BufferedWriter out; + @Override protected void setUp() throws Exception { super.setUp(); @@ -86,6 +96,9 @@ public class TestConfiguration extends TestCase { @Override protected void tearDown() throws Exception { + if(out != null) { + out.close(); + } super.tearDown(); new File(CONFIG).delete(); new File(CONFIG2).delete(); @@ -151,16 +164,189 @@ public class TestConfiguration extends TestCase { startConfig(); declareProperty("prop", "A", "A"); endConfig(); - - InputStream in1 = new ByteArrayInputStream(writer.toString().getBytes()); + + InputStream in1 = Mockito.spy(new ByteArrayInputStream( + writer.toString().getBytes())); Configuration conf = new Configuration(false); conf.addResource(in1); assertEquals("A", conf.get("prop")); + Mockito.verify(in1, Mockito.times(1)).close(); InputStream in2 = new ByteArrayInputStream(writer.toString().getBytes()); conf.addResource(in2); assertEquals("A", conf.get("prop")); } + public void testFinalWarnings() throws Exception { + // Make a configuration file with a final property + StringWriter writer = new StringWriter(); + out = new BufferedWriter(writer); + startConfig(); + declareProperty("prop", "A", "A", true); + endConfig(); + byte[] bytes = writer.toString().getBytes(); + InputStream in1 = new ByteArrayInputStream(bytes); + + // Make a second config file with a final property with a different value + writer = new StringWriter(); + out = new BufferedWriter(writer); + startConfig(); + declareProperty("prop", "BB", "BB", true); + endConfig(); + byte[] bytes2 = writer.toString().getBytes(); + InputStream in2 = new ByteArrayInputStream(bytes2); + + // Attach our own log appender so we can verify output + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + + try { + // Add the 2 different resources - this should generate a warning + conf.addResource(in1); + conf.addResource(in2); + assertEquals("should see the first value", "A", conf.get("prop")); + + List events = appender.getLog(); + assertEquals("overriding a final parameter should cause logging", 1, + events.size()); + LoggingEvent loggingEvent = events.get(0); + String renderedMessage = loggingEvent.getRenderedMessage(); + assertTrue("did not see expected string inside message "+ renderedMessage, + renderedMessage.contains("an attempt to override final parameter: " + + "prop; Ignoring.")); + } finally { + // Make sure the appender is removed + logger.removeAppender(appender); + } + } + + public void testNoFinalWarnings() throws Exception { + // Make a configuration file with a final property + StringWriter writer = new StringWriter(); + out = new BufferedWriter(writer); + startConfig(); + declareProperty("prop", "A", "A", true); + endConfig(); + byte[] bytes = writer.toString().getBytes(); + // The 2 input streams both have the same config file + InputStream in1 = new ByteArrayInputStream(bytes); + InputStream in2 = new ByteArrayInputStream(bytes); + + // Attach our own log appender so we can verify output + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + + try { + // Add the resource twice from a stream - should not generate warnings + conf.addResource(in1); + conf.addResource(in2); + assertEquals("A", conf.get("prop")); + + List events = appender.getLog(); + for (LoggingEvent loggingEvent : events) { + System.out.println("Event = " + loggingEvent.getRenderedMessage()); + } + assertTrue("adding same resource twice should not cause logging", + events.isEmpty()); + } finally { + // Make sure the appender is removed + logger.removeAppender(appender); + } + } + + + + public void testFinalWarningsMultiple() throws Exception { + // Make a configuration file with a repeated final property + StringWriter writer = new StringWriter(); + out = new BufferedWriter(writer); + startConfig(); + declareProperty("prop", "A", "A", true); + declareProperty("prop", "A", "A", true); + endConfig(); + byte[] bytes = writer.toString().getBytes(); + InputStream in1 = new ByteArrayInputStream(bytes); + + // Attach our own log appender so we can verify output + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + + try { + // Add the resource - this should not produce a warning + conf.addResource(in1); + assertEquals("should see the value", "A", conf.get("prop")); + + List events = appender.getLog(); + for (LoggingEvent loggingEvent : events) { + System.out.println("Event = " + loggingEvent.getRenderedMessage()); + } + assertTrue("adding same resource twice should not cause logging", + events.isEmpty()); + } finally { + // Make sure the appender is removed + logger.removeAppender(appender); + } + } + + public void testFinalWarningsMultipleOverride() throws Exception { + // Make a configuration file with 2 final properties with different values + StringWriter writer = new StringWriter(); + out = new BufferedWriter(writer); + startConfig(); + declareProperty("prop", "A", "A", true); + declareProperty("prop", "BB", "BB", true); + endConfig(); + byte[] bytes = writer.toString().getBytes(); + InputStream in1 = new ByteArrayInputStream(bytes); + + // Attach our own log appender so we can verify output + TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + + try { + // Add the resource - this should produce a warning + conf.addResource(in1); + assertEquals("should see the value", "A", conf.get("prop")); + + List events = appender.getLog(); + assertEquals("overriding a final parameter should cause logging", 1, + events.size()); + LoggingEvent loggingEvent = events.get(0); + String renderedMessage = loggingEvent.getRenderedMessage(); + assertTrue("did not see expected string inside message "+ renderedMessage, + renderedMessage.contains("an attempt to override final parameter: " + + "prop; Ignoring.")); + } finally { + // Make sure the appender is removed + logger.removeAppender(appender); + } + } + + /** + * A simple appender for white box testing. + */ + private static class TestAppender extends AppenderSkeleton { + private final List log = new ArrayList<>(); + + @Override public boolean requiresLayout() { + return false; + } + + @Override protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override public void close() { + } + + public List getLog() { + return new ArrayList<>(log); + } + } + /** * Tests use of multi-byte characters in property names and values. This test * round-trips multi-byte string literals through saving and loading of config @@ -701,8 +887,6 @@ public class TestConfiguration extends TestCase { new File(new File(relConfig).getParent()).delete(); } - BufferedWriter out; - public void testIntegerRanges() { Configuration conf = new Configuration(); conf.set("first", "-100"); @@ -1610,8 +1794,41 @@ public class TestConfiguration extends TestCase { assertEquals(fileResource.toString(),prop.getResource()); } } - - + + public void testDumpSensitiveProperty() throws IOException { + final String myPassword = "ThisIsMyPassword"; + Configuration testConf = new Configuration(false); + out = new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.password", myPassword); + endConfig(); + Path fileResource = new Path(CONFIG); + testConf.addResource(fileResource); + + try (StringWriter outWriter = new StringWriter()) { + testConf.set(SENSITIVE_CONFIG_KEYS, "password$"); + Configuration.dumpConfiguration(testConf, "test.password", outWriter); + assertFalse(outWriter.toString().contains(myPassword)); + } + } + + public void testDumpSensitiveConfiguration() throws IOException { + final String myPassword = "ThisIsMyPassword"; + Configuration testConf = new Configuration(false); + out = new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + appendProperty("test.password", myPassword); + endConfig(); + Path fileResource = new Path(CONFIG); + testConf.addResource(fileResource); + + try (StringWriter outWriter = new StringWriter()) { + testConf.set(SENSITIVE_CONFIG_KEYS, "password$"); + Configuration.dumpConfiguration(testConf, outWriter); + assertFalse(outWriter.toString().contains(myPassword)); + } + } + public void testGetValByRegex() { Configuration conf = new Configuration(); String key1 = "t.abc.key1"; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java index 1962f49ccec..61a688ea4ee 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java @@ -36,6 +36,7 @@ import org.junit.Test; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index 527b9eb8e43..90eaa2a65fc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.fs; +import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; @@ -31,7 +33,11 @@ import static org.apache.hadoop.fs.FileSystemTestHelper.*; import java.io.*; import java.net.URI; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; import java.util.Random; +import java.util.Set; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows; @@ -46,6 +52,8 @@ import org.junit.Test; import org.junit.rules.Timeout; import org.mockito.internal.util.reflection.Whitebox; +import javax.annotation.Nonnull; + /** * This class tests the local file system via the FileSystem abstraction. @@ -210,8 +218,8 @@ public class TestLocalFileSystem { @Test public void testHomeDirectory() throws IOException { - Path home = new Path(System.getProperty("user.home")) - .makeQualified(fileSys); + Path home = fileSys.makeQualified( + new Path(System.getProperty("user.home"))); Path fsHome = fileSys.getHomeDirectory(); assertEquals(home, fsHome); } @@ -221,7 +229,7 @@ public class TestLocalFileSystem { Path path = new Path(TEST_ROOT_DIR, "foo%bar"); writeFile(fileSys, path, 1); FileStatus status = fileSys.getFileStatus(path); - assertEquals(path.makeQualified(fileSys), status.getPath()); + assertEquals(fileSys.makeQualified(path), status.getPath()); cleanupFile(fileSys, path); } @@ -659,7 +667,7 @@ public class TestLocalFileSystem { try { FSDataOutputStreamBuilder builder = - fileSys.createFile(path); + fileSys.createFile(path).recursive(); FSDataOutputStream out = builder.build(); String content = "Create with a generic type of createFile!"; byte[] contentOrigin = content.getBytes("UTF8"); @@ -703,4 +711,66 @@ public class TestLocalFileSystem { Assert.assertEquals("Buffer size should be 0", builder.getBufferSize(), 0); } + + /** + * A builder to verify configuration keys are supported. + */ + private static class BuilderWithSupportedKeys + extends FSDataOutputStreamBuilder { + + private final Set supportedKeys = new HashSet<>(); + + BuilderWithSupportedKeys(@Nonnull final Collection supportedKeys, + @Nonnull FileSystem fileSystem, @Nonnull Path p) { + super(fileSystem, p); + this.supportedKeys.addAll(supportedKeys); + } + + @Override + protected BuilderWithSupportedKeys getThisBuilder() { + return this; + } + + @Override + public FSDataOutputStream build() + throws IllegalArgumentException, IOException { + Set unsupported = new HashSet<>(getMandatoryKeys()); + unsupported.removeAll(supportedKeys); + Preconditions.checkArgument(unsupported.isEmpty(), + "unsupported key found: " + supportedKeys); + return getFS().create( + getPath(), getPermission(), getFlags(), getBufferSize(), + getReplication(), getBlockSize(), getProgress(), getChecksumOpt()); + } + } + + @Test + public void testFSOutputStreamBuilderOptions() throws Exception { + Path path = new Path(TEST_ROOT_DIR, "testBuilderOpt"); + final List supportedKeys = Arrays.asList("strM"); + + FSDataOutputStreamBuilder builder = + new BuilderWithSupportedKeys(supportedKeys, fileSys, path); + builder.opt("strKey", "value"); + builder.opt("intKey", 123); + builder.opt("strM", "ignored"); + // Over-write an optional value with a mandatory value. + builder.must("strM", "value"); + builder.must("unsupported", 12.34); + + assertEquals("Optional value should be overwrite by a mandatory value", + "value", builder.getOptions().get("strM")); + + Set mandatoryKeys = builder.getMandatoryKeys(); + Set expectedKeys = new HashSet<>(); + expectedKeys.add("strM"); + expectedKeys.add("unsupported"); + assertEquals(expectedKeys, mandatoryKeys); + assertEquals(2, mandatoryKeys.size()); + + LambdaTestUtils.intercept(IllegalArgumentException.class, + "unsupported key found", builder::build + ); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java index 6b3e98bd95a..d61b6354498 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java @@ -60,6 +60,19 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length); } + @Test + public void testBuilderAppendToEmptyFile() throws Throwable { + touch(getFileSystem(), target); + byte[] dataset = dataset(256, 'a', 'z'); + try (FSDataOutputStream outputStream = + getFileSystem().appendFile(target).build()) { + outputStream.write(dataset); + } + byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target, + dataset.length); + ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length); + } + @Test public void testAppendNonexistentFile() throws Throwable { try { @@ -78,15 +91,29 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB byte[] original = dataset(8192, 'A', 'Z'); byte[] appended = dataset(8192, '0', '9'); createFile(getFileSystem(), target, false, original); - FSDataOutputStream outputStream = getFileSystem().append(target); - outputStream.write(appended); - outputStream.close(); + try (FSDataOutputStream out = getFileSystem().append(target)) { + out.write(appended); + } byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target, original.length + appended.length); ContractTestUtils.validateFileContent(bytes, new byte[] [] { original, appended }); } + @Test + public void testBuilderAppendToExistingFile() throws Throwable { + byte[] original = dataset(8192, 'A', 'Z'); + byte[] appended = dataset(8192, '0', '9'); + createFile(getFileSystem(), target, false, original); + try (FSDataOutputStream out = getFileSystem().appendFile(target).build()) { + out.write(appended); + } + byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target, + original.length + appended.length); + ContractTestUtils.validateFileContent(bytes, + new byte[][]{original, appended}); + } + @Test public void testAppendMissingTarget() throws Throwable { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java index a9ce6078023..2053f50b6bc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java @@ -47,24 +47,37 @@ public abstract class AbstractContractCreateTest extends */ public static final int CREATE_TIMEOUT = 15000; - @Test - public void testCreateNewFile() throws Throwable { - describe("Foundational 'create a file' test"); - Path path = path("testCreateNewFile"); + protected Path path(String filepath, boolean useBuilder) throws IOException { + return super.path(filepath + (useBuilder ? "" : "-builder")); + } + + private void testCreateNewFile(boolean useBuilder) throws Throwable { + describe("Foundational 'create a file' test, using builder API=" + + useBuilder); + Path path = path("testCreateNewFile", useBuilder); byte[] data = dataset(256, 'a', 'z'); - writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false); + writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false, + useBuilder); ContractTestUtils.verifyFileContents(getFileSystem(), path, data); } @Test - public void testCreateFileOverExistingFileNoOverwrite() throws Throwable { - describe("Verify overwriting an existing file fails"); - Path path = path("testCreateFileOverExistingFileNoOverwrite"); + public void testCreateNewFile() throws Throwable { + testCreateNewFile(true); + testCreateNewFile(false); + } + + private void testCreateFileOverExistingFileNoOverwrite(boolean useBuilder) + throws Throwable { + describe("Verify overwriting an existing file fails, using builder API=" + + useBuilder); + Path path = path("testCreateFileOverExistingFileNoOverwrite", useBuilder); byte[] data = dataset(256, 'a', 'z'); writeDataset(getFileSystem(), path, data, data.length, 1024, false); byte[] data2 = dataset(10 * 1024, 'A', 'Z'); try { - writeDataset(getFileSystem(), path, data2, data2.length, 1024, false); + writeDataset(getFileSystem(), path, data2, data2.length, 1024, false, + useBuilder); fail("writing without overwrite unexpectedly succeeded"); } catch (FileAlreadyExistsException expected) { //expected @@ -76,6 +89,26 @@ public abstract class AbstractContractCreateTest extends } } + @Test + public void testCreateFileOverExistingFileNoOverwrite() throws Throwable { + testCreateFileOverExistingFileNoOverwrite(false); + testCreateFileOverExistingFileNoOverwrite(true); + } + + private void testOverwriteExistingFile(boolean useBuilder) throws Throwable { + describe("Overwrite an existing file and verify the new data is there, " + + "use builder API=" + useBuilder); + Path path = path("testOverwriteExistingFile", useBuilder); + byte[] data = dataset(256, 'a', 'z'); + writeDataset(getFileSystem(), path, data, data.length, 1024, false, + useBuilder); + ContractTestUtils.verifyFileContents(getFileSystem(), path, data); + byte[] data2 = dataset(10 * 1024, 'A', 'Z'); + writeDataset(getFileSystem(), path, data2, data2.length, 1024, true, + useBuilder); + ContractTestUtils.verifyFileContents(getFileSystem(), path, data2); + } + /** * This test catches some eventual consistency problems that blobstores exhibit, * as we are implicitly verifying that updates are consistent. This @@ -84,25 +117,21 @@ public abstract class AbstractContractCreateTest extends */ @Test public void testOverwriteExistingFile() throws Throwable { - describe("Overwrite an existing file and verify the new data is there"); - Path path = path("testOverwriteExistingFile"); - byte[] data = dataset(256, 'a', 'z'); - writeDataset(getFileSystem(), path, data, data.length, 1024, false); - ContractTestUtils.verifyFileContents(getFileSystem(), path, data); - byte[] data2 = dataset(10 * 1024, 'A', 'Z'); - writeDataset(getFileSystem(), path, data2, data2.length, 1024, true); - ContractTestUtils.verifyFileContents(getFileSystem(), path, data2); + testOverwriteExistingFile(false); + testOverwriteExistingFile(true); } - @Test - public void testOverwriteEmptyDirectory() throws Throwable { - describe("verify trying to create a file over an empty dir fails"); + private void testOverwriteEmptyDirectory(boolean useBuilder) + throws Throwable { + describe("verify trying to create a file over an empty dir fails, " + + "use builder API=" + useBuilder); Path path = path("testOverwriteEmptyDirectory"); mkdirs(path); assertIsDirectory(path); byte[] data = dataset(256, 'a', 'z'); try { - writeDataset(getFileSystem(), path, data, data.length, 1024, true); + writeDataset(getFileSystem(), path, data, data.length, 1024, true, + useBuilder); assertIsDirectory(path); fail("write of file over empty dir succeeded"); } catch (FileAlreadyExistsException expected) { @@ -121,8 +150,15 @@ public abstract class AbstractContractCreateTest extends } @Test - public void testOverwriteNonEmptyDirectory() throws Throwable { - describe("verify trying to create a file over a non-empty dir fails"); + public void testOverwriteEmptyDirectory() throws Throwable { + testOverwriteEmptyDirectory(false); + testOverwriteEmptyDirectory(true); + } + + private void testOverwriteNonEmptyDirectory(boolean useBuilder) + throws Throwable { + describe("verify trying to create a file over a non-empty dir fails, " + + "use builder API=" + useBuilder); Path path = path("testOverwriteNonEmptyDirectory"); mkdirs(path); try { @@ -140,7 +176,7 @@ public abstract class AbstractContractCreateTest extends byte[] data = dataset(256, 'a', 'z'); try { writeDataset(getFileSystem(), path, data, data.length, 1024, - true); + true, useBuilder); FileStatus status = getFileSystem().getFileStatus(path); boolean isDir = status.isDirectory(); @@ -166,6 +202,12 @@ public abstract class AbstractContractCreateTest extends assertIsFile(child); } + @Test + public void testOverwriteNonEmptyDirectory() throws Throwable { + testOverwriteNonEmptyDirectory(false); + testOverwriteNonEmptyDirectory(true); + } + @Test public void testCreatedFileIsImmediatelyVisible() throws Throwable { describe("verify that a newly created file exists as soon as open returns"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java index e60fd4347e4..8c01d2b776d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java @@ -70,7 +70,8 @@ public class ContractTestUtils extends Assert { * Assert that a property in the property set matches the expected value. * @param props property set * @param key property name - * @param expected expected value. If null, the property must not be in the set + * @param expected expected value. If null, the property must not be in the + * set */ public static void assertPropertyEquals(Properties props, String key, @@ -146,16 +147,45 @@ public class ContractTestUtils extends Assert { int len, int buffersize, boolean overwrite) throws IOException { + writeDataset(fs, path, src, len, buffersize, overwrite, false); + } + + /** + * Write a file. + * Optional flags control + * whether file overwrite operations should be enabled + * Optional using {@link org.apache.hadoop.fs.FSDataOutputStreamBuilder} + * + * @param fs filesystem + * @param path path to write to + * @param len length of data + * @param overwrite should the create option allow overwrites? + * @param useBuilder should use builder API to create file? + * @throws IOException IO problems + */ + public static void writeDataset(FileSystem fs, Path path, byte[] src, + int len, int buffersize, boolean overwrite, boolean useBuilder) + throws IOException { assertTrue( "Not enough data in source array to write " + len + " bytes", src.length >= len); - FSDataOutputStream out = fs.create(path, - overwrite, - fs.getConf() - .getInt(IO_FILE_BUFFER_SIZE_KEY, - IO_FILE_BUFFER_SIZE_DEFAULT), - (short) 1, - buffersize); + FSDataOutputStream out; + if (useBuilder) { + out = fs.createFile(path) + .overwrite(overwrite) + .replication((short) 1) + .bufferSize(buffersize) + .blockSize(buffersize) + .build(); + } else { + out = fs.create(path, + overwrite, + fs.getConf() + .getInt(IO_FILE_BUFFER_SIZE_KEY, + IO_FILE_BUFFER_SIZE_DEFAULT), + (short) 1, + buffersize); + } out.write(src, 0, len); out.close(); assertFileHasLength(fs, path, len); @@ -203,7 +233,7 @@ public class ContractTestUtils extends Assert { assertTrue("not a file " + statText, stat.isFile()); assertEquals("wrong length " + statText, original.length, stat.getLen()); byte[] bytes = readDataset(fs, path, original.length); - compareByteArrays(original,bytes,original.length); + compareByteArrays(original, bytes, original.length); } /** @@ -222,7 +252,7 @@ public class ContractTestUtils extends Assert { stm.readFully(out); byte[] expected = Arrays.copyOfRange(fileContents, seekOff, seekOff + toRead); - compareByteArrays(expected, out,toRead); + compareByteArrays(expected, out, toRead); } /** @@ -239,11 +269,11 @@ public class ContractTestUtils extends Assert { assertEquals("Number of bytes read != number written", len, received.length); int errors = 0; - int first_error_byte = -1; + int firstErrorByte = -1; for (int i = 0; i < len; i++) { if (original[i] != received[i]) { if (errors == 0) { - first_error_byte = i; + firstErrorByte = i; } errors++; } @@ -256,8 +286,8 @@ public class ContractTestUtils extends Assert { // the range either side of the first error to print // this is a purely arbitrary number, to aid user debugging final int overlap = 10; - for (int i = Math.max(0, first_error_byte - overlap); - i < Math.min(first_error_byte + overlap, len); + for (int i = Math.max(0, firstErrorByte - overlap); + i < Math.min(firstErrorByte + overlap, len); i++) { byte actual = received[i]; byte expected = original[i]; @@ -450,7 +480,7 @@ public class ContractTestUtils extends Assert { public static void downgrade(String message, Throwable failure) { LOG.warn("Downgrading test " + message, failure); AssumptionViolatedException ave = - new AssumptionViolatedException(failure, null); + new AssumptionViolatedException(failure, null); throw ave; } @@ -494,9 +524,9 @@ public class ContractTestUtils extends Assert { int expected) throws IOException { FileStatus status = fs.getFileStatus(path); assertEquals( - "Wrong file length of file " + path + " status: " + status, - expected, - status.getLen()); + "Wrong file length of file " + path + " status: " + status, + expected, + status.getLen()); } /** @@ -682,7 +712,8 @@ public class ContractTestUtils extends Assert { */ public static String ls(FileSystem fileSystem, Path path) throws IOException { if (path == null) { - //surfaces when someone calls getParent() on something at the top of the path + // surfaces when someone calls getParent() on something at the top of the + // path return "/"; } FileStatus[] stats; @@ -864,7 +895,7 @@ public class ContractTestUtils extends Assert { } /** - * Test for the host being an OSX machine + * Test for the host being an OSX machine. * @return true if the JVM thinks that is running on OSX */ public static boolean isOSX() { @@ -887,8 +918,9 @@ public class ContractTestUtils extends Assert { break; } } - if (mismatch) + if (mismatch) { break; + } } assertFalse("File content of file is not as expected at offset " + idx, mismatch); @@ -998,7 +1030,9 @@ public class ContractTestUtils extends Assert { * @throws IOException * thrown if an I/O error occurs while writing or reading the test file */ - public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize) + public static void createAndVerifyFile(FileSystem fs, + Path parent, + final long fileSize) throws IOException { int testBufferSize = fs.getConf() .getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE); @@ -1495,13 +1529,21 @@ public class ContractTestUtils extends Assert { * printing some useful results in the process. */ public static final class NanoTimer { - private final long startTime; + private long startTime; private long endTime; public NanoTimer() { startTime = now(); } + /** + * Reset the timer. Equivalent to the reset button of a stopwatch. + */ + public void reset() { + endTime = 0; + startTime = now(); + } + /** * End the operation. * @return the duration of the operation diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java new file mode 100644 index 00000000000..31cacf786d8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.protocolPB; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import static org.apache.hadoop.fs.FSProtos.*; + +import org.junit.Test; +import static org.junit.Assert.*; + +/** + * Verify PB serialization of FS data structures. + */ +public class TestFSSerialization { + + @Test + @SuppressWarnings("deprecation") + public void testWritableFlagSerialization() throws Exception { + final Path p = new Path("hdfs://yaks:4344/dingos/f"); + for (int i = 0; i < 0x8; ++i) { + final boolean acl = 0 != (i & 0x1); + final boolean crypt = 0 != (i & 0x2); + final boolean ec = 0 != (i & 0x4); + FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31, + 12345678L, 87654321L, FsPermission.getFileDefault(), + "hadoop", "unqbbc", null, p, acl, crypt, ec); + DataOutputBuffer dob = new DataOutputBuffer(); + stat.write(dob); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(dob.getData(), 0, dob.getLength()); + FileStatus fstat = new FileStatus(); + fstat.readFields(dib); + assertEquals(stat, fstat); + checkFields(stat, fstat); + } + } + + @Test + public void testUtilitySerialization() throws Exception { + final Path p = new Path("hdfs://yaks:4344/dingos/f"); + FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31, + 12345678L, 87654321L, FsPermission.createImmutable((short)0111), + "hadoop", "unqbbc", null, p); + FileStatusProto fsp = PBHelper.convert(stat); + FileStatus stat2 = PBHelper.convert(fsp); + assertEquals(stat, stat2); + checkFields(stat, stat2); + } + + private static void checkFields(FileStatus expected, FileStatus actual) { + assertEquals(expected.getPath(), actual.getPath()); + assertEquals(expected.isDirectory(), actual.isDirectory()); + assertEquals(expected.getLen(), actual.getLen()); + assertEquals(expected.getPermission(), actual.getPermission()); + assertEquals(expected.getOwner(), actual.getOwner()); + assertEquals(expected.getGroup(), actual.getGroup()); + assertEquals(expected.getModificationTime(), actual.getModificationTime()); + assertEquals(expected.getAccessTime(), actual.getAccessTime()); + assertEquals(expected.getReplication(), actual.getReplication()); + assertEquals(expected.getBlockSize(), actual.getBlockSize()); + assertEquals(expected.hasAcl(), actual.hasAcl()); + assertEquals(expected.isEncrypted(), actual.isEncrypted()); + assertEquals(expected.isErasureCoded(), actual.isErasureCoded()); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index 6ec6e0f965e..ca7e466b79e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -682,4 +682,17 @@ public class TestHttpServer extends HttpServerFunctionalTest { stopHttpServer(myServer2); } } + + @Test + public void testBacklogSize() throws Exception + { + final int backlogSize = 2048; + Configuration conf = new Configuration(); + conf.setInt(HttpServer2.HTTP_SOCKET_BACKLOG_SIZE_KEY, backlogSize); + HttpServer2 srv = createServer("test", conf); + List listeners = (List) Whitebox.getInternalState(srv, + "listeners"); + ServerConnector listener = (ServerConnector)listeners.get(0); + assertEquals(backlogSize, listener.getAcceptQueueSize()); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java index 25e2ce9be98..58537adf5cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java @@ -131,4 +131,9 @@ public class TestGenericsUtil extends TestCase { GenericClass.class, c2); } + public void testIsLog4jLogger() throws Exception { + assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null)); + assertTrue("The implementation is Log4j", + GenericsUtil.isLog4jLogger(TestGenericsUtil.class)); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java new file mode 100644 index 00000000000..3e78a44fa70 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util.curator; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test the manager for ZooKeeper Curator. + */ +public class TestZKCuratorManager { + + private TestingServer server; + private ZKCuratorManager curator; + + @Before + public void setup() throws Exception { + this.server = new TestingServer(); + + Configuration conf = new Configuration(); + conf.set( + CommonConfigurationKeys.ZK_ADDRESS, this.server.getConnectString()); + + this.curator = new ZKCuratorManager(conf); + this.curator.start(); + } + + @After + public void teardown() throws Exception { + this.curator.close(); + if (this.server != null) { + this.server.close(); + this.server = null; + } + } + + @Test + public void testReadWriteData() throws Exception { + String testZNode = "/test"; + String expectedString = "testString"; + assertFalse(curator.exists(testZNode)); + curator.create(testZNode); + assertTrue(curator.exists(testZNode)); + curator.setData(testZNode, expectedString, -1); + String testString = curator.getStringData("/test"); + assertEquals(expectedString, testString); + } + + @Test + public void testChildren() throws Exception { + List children = curator.getChildren("/"); + assertEquals(1, children.size()); + + assertFalse(curator.exists("/node1")); + curator.create("/node1"); + assertTrue(curator.exists("/node1")); + + assertFalse(curator.exists("/node2")); + curator.create("/node2"); + assertTrue(curator.exists("/node2")); + + children = curator.getChildren("/"); + assertEquals(3, children.size()); + + curator.delete("/node2"); + assertFalse(curator.exists("/node2")); + children = curator.getChildren("/"); + assertEquals(2, children.size()); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml index 64677f86f7a..6a3d53ad2de 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml +++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml @@ -919,15 +919,19 @@ RegexpComparator - ^( |\t)*of owner \(%u\), modification date \(%y, %Y\).( )* + ^( |\t)*of owner \(%u\), access date \(%x, %X\).( )* RegexpComparator - ^( |\t)*%y shows UTC date as "yyyy-MM-dd HH:mm:ss" and( )* + ^( |\t)*modification date \(%y, %Y\).( )* RegexpComparator - ^( |\t)*%Y shows milliseconds since January 1, 1970 UTC.( )* + ^( |\t)*%x and %y show UTC date as "yyyy-MM-dd HH:mm:ss" and( )* + + + RegexpComparator + ^( |\t)*%X and %Y show milliseconds since January 1, 1970 UTC.( )* RegexpComparator diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash index 86608edd93a..fa34bdfc4b5 100755 --- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash +++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_array_param.bats old mode 100755 new mode 100644 similarity index 58% rename from hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats rename to hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_array_param.bats index 9b031f254fb..03264c18d77 --- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats +++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_array_param.bats @@ -15,18 +15,23 @@ load hadoop-functions_test_helper -@test "hadoop_escape_sed (positive 1)" { - ret="$(hadoop_sed_escape "\pass&&word\0#\$asdf/g ><'\"~\`!@#$%^&*()_+-=")" - expected="\\\\pass\&\&word\\\0#\$asdf\/g ><'\"~\`!@#$%^\&*()_+-=" - echo "actual >${ret}<" - echo "expected >${expected}<" - [ "${ret}" = "${expected}" ] +@test "hadoop_add_array_param (empty)" { + hadoop_add_array_param ARRAY value + [ "${ARRAY[0]}" = value ] +} + +@test "hadoop_add_array_param (exist)" { + ARRAY=("val2") + hadoop_add_array_param ARRAY val1 + [ "${ARRAY[0]}" = val2 ] + [ "${ARRAY[1]}" = val1 ] +} + +@test "hadoop_add_array_param (double exist)" { + ARRAY=("val2" "val1") + hadoop_add_array_param ARRAY val3 + [ "${ARRAY[0]}" = val2 ] + [ "${ARRAY[1]}" = val1 ] + [ "${ARRAY[2]}" = val3 ] } -@test "hadoop_escape_xml (positive 1)" { - ret="$(hadoop_xml_escape "\pass&&word\0#\$asdf/g ><'\"~\`!@#$%^&*()_+-=")" - expected="\\pass&&word\0#\$asdf/g \>\<\'\"~\`!@#\$%^&*()_+-=" - echo "actual >${ret}<" - echo "expected >${expected}<" - [ "${ret}" = "${expected}" ] -} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_array_contains.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_array_contains.bats new file mode 100644 index 00000000000..01cb4e3bc48 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_array_contains.bats @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load hadoop-functions_test_helper + +@test "hadoop_array_contains (empty)" { + run hadoop_array_contains value "${ARRAY[@]}" + [ "${status}" = 1 ] +} + +@test "hadoop_array_contains (exist)" { + ARRAY=("value") + run hadoop_array_contains value "${ARRAY[@]}" + [ "${status}" = 0 ] +} + +@test "hadoop_array_contains (notexist)" { + ARRAY=("different") + run hadoop_array_contains value "${ARRAY[@]}" + [ "${status}" = 1 ] +} + +@test "hadoop_array_contains (exist, multi)" { + ARRAY=("val1" "val2" "val3") + for j in val1 val2 val3; do + run hadoop_array_contains "${j}" "${ARRAY[@]}" + [ "${status}" = 0 ] + done +} + +@test "hadoop_array_contains (multi, not exist)" { + ARRAY=("val1" "val2" "val3") + run hadoop_array_contains value "${ARRAY[@]}" + [ "${status}" = 1 ] +} diff --git a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_sort_array.bats similarity index 62% rename from hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem rename to hadoop-common-project/hadoop-common/src/test/scripts/hadoop_sort_array.bats index 9f4922bb7fe..7a18b5d0cba 100644 --- a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_sort_array.bats @@ -13,5 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -org.apache.hadoop.fs.azure.NativeAzureFileSystem -org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure \ No newline at end of file +load hadoop-functions_test_helper + +@test "hadoop_sort_array (empty)" { + hadoop_sort_array ARRAY +} + +@test "hadoop_sort_array (single value)" { + ARRAY=("value") + hadoop_sort_array ARRAY +} + +@test "hadoop_sort_array (multiple value)" { + ARRAY=("b" "c" "a") + preifsod=$(echo "${IFS}" | od -c) + hadoop_sort_array ARRAY + postifsod=$(echo "${IFS}" | od -c) + + [ "${ARRAY[0]}" = "a" ] + [ "${ARRAY[1]}" = "b" ] + [ "${ARRAY[2]}" = "c" ] + [ "${preifsod}" = "${postifsod}" ] +} diff --git a/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh b/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh index c5307163468..0d084bb36e6 100755 --- a/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh +++ b/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh @@ -16,7 +16,7 @@ # limitations under the License. if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "kms" "run KMS, the Key Management Server" + hadoop_add_subcommand "kms" daemon "run KMS, the Key Management Server" fi ## @description Command handler for kms subcommand @@ -54,4 +54,4 @@ function hadoop_subcommand_kms [[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then hadoop_mkdir "${KMS_TEMP:-${HADOOP_HOME}/temp}" fi -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 1f6022ca356..47c14e23805 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2764,7 +2764,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } - public HashMap getErasureCodingCodecs() throws IOException { + public Map getErasureCodingCodecs() throws IOException { checkOpen(); try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) { return namenode.getErasureCodingCodecs(); @@ -2774,25 +2774,43 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, public AddECPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { checkOpen(); - return namenode.addErasureCodingPolicies(policies); + try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) { + return namenode.addErasureCodingPolicies(policies); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } } public void removeErasureCodingPolicy(String ecPolicyName) throws IOException { checkOpen(); - namenode.removeErasureCodingPolicy(ecPolicyName); + try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) { + namenode.removeErasureCodingPolicy(ecPolicyName); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } } public void enableErasureCodingPolicy(String ecPolicyName) throws IOException { checkOpen(); - namenode.enableErasureCodingPolicy(ecPolicyName); + try (TraceScope ignored = tracer.newScope("enableErasureCodingPolicy")) { + namenode.enableErasureCodingPolicy(ecPolicyName); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + SafeModeException.class); + } } public void disableErasureCodingPolicy(String ecPolicyName) throws IOException { checkOpen(); - namenode.disableErasureCodingPolicy(ecPolicyName); + try (TraceScope ignored = tracer.newScope("disableErasureCodingPolicy")) { + namenode.disableErasureCodingPolicy(ecPolicyName); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + SafeModeException.class); + } } public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { @@ -3026,7 +3044,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, * * @param src path to get the information for * @return Returns the policy information if file or directory on the path is - * erasure coded, null otherwise + * erasure coded, null otherwise. Null will be returned if directory or file + * has REPLICATION policy. * @throws IOException */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java index 748edcdb275..b58cf16a323 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java @@ -61,4 +61,6 @@ public class DFSClientFaultInjector { public boolean skipRollingRestartWait() { return false; } + + public void sleepBeforeHedgedGet() {} } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index dcc997c173a..97d3de4a96e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -830,60 +830,85 @@ public class DFSInputStream extends FSInputStream private DNAddrPair chooseDataNode(LocatedBlock block, Collection ignoredNodes) throws IOException { + return chooseDataNode(block, ignoredNodes, true); + } + + /** + * Choose datanode to read from. + * + * @param block Block to choose datanode addr from + * @param ignoredNodes Ignored nodes inside. + * @param refetchIfRequired Whether to refetch if no nodes to chose + * from. + * @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is + * false. + */ + private DNAddrPair chooseDataNode(LocatedBlock block, + Collection ignoredNodes, boolean refetchIfRequired) + throws IOException { while (true) { DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes); if (result != null) { return result; + } else if (refetchIfRequired) { + block = refetchLocations(block, ignoredNodes); } else { - String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(), - deadNodes, ignoredNodes); - String blockInfo = block.getBlock() + " file=" + src; - if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) { - String description = "Could not obtain block: " + blockInfo; - DFSClient.LOG.warn(description + errMsg - + ". Throwing a BlockMissingException"); - throw new BlockMissingException(src, description, - block.getStartOffset()); - } - - DatanodeInfo[] nodes = block.getLocations(); - if (nodes == null || nodes.length == 0) { - DFSClient.LOG.info("No node available for " + blockInfo); - } - DFSClient.LOG.info("Could not obtain " + block.getBlock() - + " from any node: " + errMsg - + ". Will get new block locations from namenode and retry..."); - try { - // Introducing a random factor to the wait time before another retry. - // The wait time is dependent on # of failures and a random factor. - // At the first time of getting a BlockMissingException, the wait time - // is a random number between 0..3000 ms. If the first retry - // still fails, we will wait 3000 ms grace period before the 2nd retry. - // Also at the second retry, the waiting window is expanded to 6000 ms - // alleviating the request rate from the server. Similarly the 3rd retry - // will wait 6000ms grace period before retry and the waiting window is - // expanded to 9000ms. - final int timeWindow = dfsClient.getConf().getTimeWindow(); - double waitTime = timeWindow * failures + // grace period for the last round of attempt - // expanding time window for each failure - timeWindow * (failures + 1) * - ThreadLocalRandom.current().nextDouble(); - DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + - " IOException, will wait for " + waitTime + " msec."); - Thread.sleep((long)waitTime); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new InterruptedIOException( - "Interrupted while choosing DataNode for read."); - } - deadNodes.clear(); //2nd option is to remove only nodes[blockId] - openInfo(true); - block = refreshLocatedBlock(block); - failures++; + return null; } } } + private LocatedBlock refetchLocations(LocatedBlock block, + Collection ignoredNodes) throws IOException { + String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(), + deadNodes, ignoredNodes); + String blockInfo = block.getBlock() + " file=" + src; + if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) { + String description = "Could not obtain block: " + blockInfo; + DFSClient.LOG.warn(description + errMsg + + ". Throwing a BlockMissingException"); + throw new BlockMissingException(src, description, + block.getStartOffset()); + } + + DatanodeInfo[] nodes = block.getLocations(); + if (nodes == null || nodes.length == 0) { + DFSClient.LOG.info("No node available for " + blockInfo); + } + DFSClient.LOG.info("Could not obtain " + block.getBlock() + + " from any node: " + errMsg + + ". Will get new block locations from namenode and retry..."); + try { + // Introducing a random factor to the wait time before another retry. + // The wait time is dependent on # of failures and a random factor. + // At the first time of getting a BlockMissingException, the wait time + // is a random number between 0..3000 ms. If the first retry + // still fails, we will wait 3000 ms grace period before the 2nd retry. + // Also at the second retry, the waiting window is expanded to 6000 ms + // alleviating the request rate from the server. Similarly the 3rd retry + // will wait 6000ms grace period before retry and the waiting window is + // expanded to 9000ms. + final int timeWindow = dfsClient.getConf().getTimeWindow(); + // grace period for the last round of attempt + double waitTime = timeWindow * failures + + // expanding time window for each failure + timeWindow * (failures + 1) * + ThreadLocalRandom.current().nextDouble(); + DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + + " IOException, will wait for " + waitTime + " msec."); + Thread.sleep((long)waitTime); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new InterruptedIOException( + "Interrupted while choosing DataNode for read."); + } + deadNodes.clear(); //2nd option is to remove only nodes[blockId] + openInfo(true); + block = refreshLocatedBlock(block); + failures++; + return block; + } + /** * Get the best node from which to stream the data. * @param block LocatedBlock, containing nodes in priority order. @@ -985,6 +1010,7 @@ public class DFSInputStream extends FSInputStream return new Callable() { @Override public ByteBuffer call() throws Exception { + DFSClientFaultInjector.get().sleepBeforeHedgedGet(); try (TraceScope ignored = dfsClient.getTracer(). newScope("hedgedRead" + hedgedReadId, parentSpanId)) { actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks); @@ -1131,8 +1157,9 @@ public class DFSInputStream extends FSInputStream Future firstRequest = hedgedService .submit(getFromDataNodeCallable); futures.add(firstRequest); + Future future = null; try { - Future future = hedgedService.poll( + future = hedgedService.poll( conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS); if (future != null) { ByteBuffer result = future.get(); @@ -1142,34 +1169,38 @@ public class DFSInputStream extends FSInputStream } DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged " + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info); - // Ignore this node on next go around. - ignored.add(chosenNode.info); dfsClient.getHedgedReadMetrics().incHedgedReadOps(); // continue; no need to refresh block locations } catch (ExecutionException e) { - // Ignore + futures.remove(future); } catch (InterruptedException e) { throw new InterruptedIOException( "Interrupted while waiting for reading task"); } + // Ignore this node on next go around. + // If poll timeout and the request still ongoing, don't consider it + // again. If read data failed, don't consider it either. + ignored.add(chosenNode.info); } else { // We are starting up a 'hedged' read. We have a read already // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode. // If no nodes to do hedged reads against, pass. + boolean refetch = false; try { - chosenNode = getBestNodeDNAddrPair(block, ignored); - if (chosenNode == null) { - chosenNode = chooseDataNode(block, ignored); + chosenNode = chooseDataNode(block, ignored, false); + if (chosenNode != null) { + // Latest block, if refreshed internally + block = chosenNode.block; + bb = ByteBuffer.allocate(len); + Callable getFromDataNodeCallable = + getFromOneDataNode(chosenNode, block, start, end, bb, + corruptedBlocks, hedgedReadId++); + Future oneMoreRequest = + hedgedService.submit(getFromDataNodeCallable); + futures.add(oneMoreRequest); + } else { + refetch = true; } - // Latest block, if refreshed internally - block = chosenNode.block; - bb = ByteBuffer.allocate(len); - Callable getFromDataNodeCallable = getFromOneDataNode( - chosenNode, block, start, end, bb, - corruptedBlocks, hedgedReadId++); - Future oneMoreRequest = hedgedService - .submit(getFromDataNodeCallable); - futures.add(oneMoreRequest); } catch (IOException ioe) { DFSClient.LOG.debug("Failed getting node for hedged read: {}", ioe.getMessage()); @@ -1187,6 +1218,9 @@ public class DFSInputStream extends FSInputStream } catch (InterruptedException ie) { // Ignore and retry } + if (refetch) { + refetchLocations(block, ignored); + } // We got here if exception. Ignore this node on next go around IFF // we found a chosenNode to hedge read against. if (chosenNode != null && chosenNode.info != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 2e770cc1c73..e7cd0d827ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -83,6 +83,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.BlockingQueue; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -811,10 +812,30 @@ public class DFSUtilClient { public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize, int maxPoolSize, long keepAliveTimeSecs, String threadNamePrefix, boolean runRejectedExec) { + return getThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTimeSecs, + new SynchronousQueue<>(), threadNamePrefix, runRejectedExec); +} + + /** + * Utility to create a {@link ThreadPoolExecutor}. + * + * @param corePoolSize - min threads in the pool, even if idle + * @param maxPoolSize - max threads in the pool + * @param keepAliveTimeSecs - max seconds beyond which excess idle threads + * will be terminated + * @param queue - the queue to use for holding tasks before they are executed. + * @param threadNamePrefix - name prefix for the pool threads + * @param runRejectedExec - when true, rejected tasks from + * ThreadPoolExecutor are run in the context of calling thread + * @return ThreadPoolExecutor + */ + public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize, + int maxPoolSize, long keepAliveTimeSecs, BlockingQueue queue, + String threadNamePrefix, boolean runRejectedExec) { Preconditions.checkArgument(corePoolSize > 0); ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTimeSecs, TimeUnit.SECONDS, - new SynchronousQueue(), new Daemon.DaemonFactory() { + queue, new Daemon.DaemonFactory() { private final AtomicInteger threadIndex = new AtomicInteger(0); @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 34c631a66ba..ceec2b346b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -2515,8 +2514,6 @@ public class DistributedFileSystem extends FileSystem { public void setErasureCodingPolicy(final Path path, final String ecPolicyName) throws IOException { Path absF = fixRelativePart(path); - Preconditions.checkNotNull(ecPolicyName, "Erasure coding policy cannot be" + - " null."); new FileSystemLinkResolver() { @Override public Void doCall(final Path p) throws IOException { @@ -2543,7 +2540,8 @@ public class DistributedFileSystem extends FileSystem { * * @param path The path of the file or directory * @return Returns the policy information if file or directory on the path - * is erasure coded, null otherwise + * is erasure coded, null otherwise. Null will be returned if directory or + * file has REPLICATION policy. * @throws IOException */ public ErasureCodingPolicy getErasureCodingPolicy(final Path path) @@ -2570,7 +2568,8 @@ public class DistributedFileSystem extends FileSystem { } /** - * Retrieve all the erasure coding policies supported by this file system. + * Retrieve all the erasure coding policies supported by this file system, + * excluding REPLICATION policy. * * @return all erasure coding policies supported by this file system. * @throws IOException @@ -2587,7 +2586,7 @@ public class DistributedFileSystem extends FileSystem { * @return all erasure coding codecs and coders supported by this file system. * @throws IOException */ - public HashMap getAllErasureCodingCodecs() + public Map getAllErasureCodingCodecs() throws IOException { return dfs.getErasureCodingCodecs(); } @@ -2892,7 +2891,8 @@ public class DistributedFileSystem extends FileSystem { */ @Override public FSDataOutputStream build() throws IOException { - if (getFlags().contains(CreateFlag.CREATE)) { + if (getFlags().contains(CreateFlag.CREATE) || + getFlags().contains(CreateFlag.OVERWRITE)) { if (isRecursive()) { return dfs.create(getPath(), getPermission(), getFlags(), getBufferSize(), getReplication(), getBlockSize(), diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 45c6b3269b6..b0e85e55ed4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -1588,7 +1588,8 @@ public interface ClientProtocol { /** - * Get the erasure coding policies loaded in Namenode. + * Get the erasure coding policies loaded in Namenode, excluding REPLICATION + * policy. * * @throws IOException */ @@ -1601,10 +1602,11 @@ public interface ClientProtocol { * @throws IOException */ @Idempotent - HashMap getErasureCodingCodecs() throws IOException; + Map getErasureCodingCodecs() throws IOException; /** - * Get the information about the EC policy for the path. + * Get the information about the EC policy for the path. Null will be returned + * if directory or file has REPLICATION policy. * * @param src path to get the info for * @throws IOException diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java index 368a2f265e6..501b67c15bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java @@ -17,23 +17,28 @@ */ package org.apache.hadoop.hdfs.protocol; +import java.io.Serializable; + import com.google.common.base.Preconditions; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.erasurecode.ECSchema; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; /** * A policy about how to write/read/code an erasure coding file. */ @InterfaceAudience.Public @InterfaceStability.Evolving -public final class ErasureCodingPolicy { +public final class ErasureCodingPolicy implements Serializable { + private static final long serialVersionUID = 0x0079fe4e; + + private String name; private final ECSchema schema; private final int cellSize; - private String name; private byte id; public ErasureCodingPolicy(String name, ECSchema schema, @@ -103,6 +108,10 @@ public final class ErasureCodingPolicy { this.id = id; } + public boolean isReplicationPolicy() { + return (id == ErasureCodeConstants.REPLICATION_POLICY_ID); + } + @Override public boolean equals(Object o) { if (o == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java index e0dd0d70467..37d04e31cca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java @@ -27,6 +27,11 @@ import org.apache.hadoop.fs.permission.FsPermission; * done for backwards compatibility in case any existing clients assume the * value of FsPermission is in a particular range. */ + +/** + * @deprecated ACLs, encryption, and erasure coding are managed on FileStatus. + */ +@Deprecated @InterfaceAudience.Private public class FsPermissionExtension extends FsPermission { private static final long serialVersionUID = 0x13c298a4; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index b636121ac87..2681f129d7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -48,8 +48,8 @@ public final class HdfsConstants { public static final byte COLD_STORAGE_POLICY_ID = 2; public static final String COLD_STORAGE_POLICY_NAME = "COLD"; - // TODO should be conf injected? - public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; + public static final int DEFAULT_DATA_SOCKET_SIZE = 0; + /** * A special path component contained in the path for a snapshot file/dir */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index c3866022d5d..8438b01b8de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hdfs.protocol; +import java.io.IOException; import java.net.URI; +import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -31,24 +33,15 @@ import org.apache.hadoop.hdfs.DFSUtilClient; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class HdfsFileStatus { +public class HdfsFileStatus extends FileStatus { + + private static final long serialVersionUID = 0x126eb82a; // local name of the inode that's encoded in java UTF8 - private final byte[] path; - private final byte[] symlink; // symlink target encoded in java UTF8 or null - private final long length; - private final boolean isdir; - private final short block_replication; - private final long blocksize; - private final long modification_time; - private final long access_time; - private final FsPermission permission; - private final String owner; - private final String group; + private byte[] uPath; + private byte[] uSymlink; // symlink target encoded in java UTF8/null private final long fileId; - private final FileEncryptionInfo feInfo; - private final ErasureCodingPolicy ecPolicy; // Used by dir, not including dot and dotdot. Always zero for a regular file. @@ -57,12 +50,22 @@ public class HdfsFileStatus { public static final byte[] EMPTY_NAME = new byte[0]; + /** + * Set of features potentially active on an instance. + */ + public enum Flags { + HAS_ACL, + HAS_CRYPT, + HAS_EC; + } + private final EnumSet flags; + /** * Constructor. - * @param length the number of bytes the file has - * @param isdir if the path is a directory + * @param length the number of bytes the file has + * @param isdir if the path is a directory * @param block_replication the replication factor - * @param blocksize the block size + * @param blocksize the block size * @param modification_time modification time * @param access_time access time * @param permission permission @@ -77,25 +80,18 @@ public class HdfsFileStatus { * @param ecPolicy the erasure coding policy */ public HdfsFileStatus(long length, boolean isdir, int block_replication, - long blocksize, long modification_time, long access_time, - FsPermission permission, String owner, String group, byte[] symlink, - byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo, - byte storagePolicy, ErasureCodingPolicy ecPolicy) { - this.length = length; - this.isdir = isdir; - this.block_replication = (short) block_replication; - this.blocksize = blocksize; - this.modification_time = modification_time; - this.access_time = access_time; - this.permission = (permission == null) ? - ((isdir || symlink!=null) ? - FsPermission.getDefault() : - FsPermission.getFileDefault()) : - permission; - this.owner = (owner == null) ? "" : owner; - this.group = (group == null) ? "" : group; - this.symlink = symlink; - this.path = path; + long blocksize, long modification_time, + long access_time, FsPermission permission, + EnumSet flags, String owner, String group, + byte[] symlink, byte[] path, long fileId, + int childrenNum, FileEncryptionInfo feInfo, + byte storagePolicy, ErasureCodingPolicy ecPolicy) { + super(length, isdir, block_replication, blocksize, modification_time, + access_time, convert(isdir, symlink != null, permission, flags), + owner, group, null, null); + this.flags = flags; + this.uSymlink = symlink; + this.uPath = path; this.fileId = fileId; this.childrenNum = childrenNum; this.feInfo = feInfo; @@ -104,83 +100,48 @@ public class HdfsFileStatus { } /** - * Get the length of this file, in bytes. - * @return the length of this file, in bytes. + * Set redundant flags for compatibility with existing applications. */ - public final long getLen() { - return length; + protected static FsPermission convert(boolean isdir, boolean symlink, + FsPermission p, EnumSet f) { + if (p instanceof FsPermissionExtension) { + // verify flags are set consistently + assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL); + assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT); + assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC); + return p; + } + if (null == p) { + if (isdir) { + p = FsPermission.getDirDefault(); + } else if (symlink) { + p = FsPermission.getDefault(); + } else { + p = FsPermission.getFileDefault(); + } + } + return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL), + f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC)); } - /** - * Is this a directory? - * @return true if this is a directory - */ - public final boolean isDir() { - return isdir; - } - - /** - * Is this a symbolic link? - * @return true if this is a symbolic link - */ + @Override public boolean isSymlink() { - return symlink != null; + return uSymlink != null; } - /** - * Get the block size of the file. - * @return the number of bytes - */ - public final long getBlockSize() { - return blocksize; + @Override + public boolean hasAcl() { + return flags.contains(Flags.HAS_ACL); } - /** - * Get the replication factor of a file. - * @return the replication factor of a file. - */ - public final short getReplication() { - return block_replication; + @Override + public boolean isEncrypted() { + return flags.contains(Flags.HAS_CRYPT); } - /** - * Get the modification time of the file. - * @return the modification time of file in milliseconds since January 1, 1970 UTC. - */ - public final long getModificationTime() { - return modification_time; - } - - /** - * Get the access time of the file. - * @return the access time of file in milliseconds since January 1, 1970 UTC. - */ - public final long getAccessTime() { - return access_time; - } - - /** - * Get FsPermission associated with the file. - * @return permission - */ - public final FsPermission getPermission() { - return permission; - } - - /** - * Get the owner of the file. - * @return owner of the file - */ - public final String getOwner() { - return owner; - } - - /** - * Get the group associated with the file. - * @return group for the file. - */ - public final String getGroup() { - return group; + @Override + public boolean isErasureCoded() { + return flags.contains(Flags.HAS_EC); } /** @@ -188,7 +149,7 @@ public class HdfsFileStatus { * @return true if the name is empty */ public final boolean isEmptyLocalName() { - return path.length == 0; + return uPath.length == 0; } /** @@ -196,7 +157,7 @@ public class HdfsFileStatus { * @return the local name in string */ public final String getLocalName() { - return DFSUtilClient.bytes2String(path); + return DFSUtilClient.bytes2String(uPath); } /** @@ -204,7 +165,7 @@ public class HdfsFileStatus { * @return the local name in java UTF8 */ public final byte[] getLocalNameInBytes() { - return path; + return uPath; } /** @@ -238,16 +199,24 @@ public class HdfsFileStatus { return new Path(parent, getLocalName()); } - /** - * Get the string representation of the symlink. - * @return the symlink as a string. - */ - public final String getSymlink() { - return DFSUtilClient.bytes2String(symlink); + @Override + public Path getSymlink() throws IOException { + if (isSymlink()) { + return new Path(DFSUtilClient.bytes2String(uSymlink)); + } + throw new IOException("Path " + getPath() + " is not a symbolic link"); } + @Override + public void setSymlink(Path sym) { + uSymlink = DFSUtilClient.string2Bytes(sym.toString()); + } + + /** + * Opaque referant for the symlink, to be resolved at the client. + */ public final byte[] getSymlinkInBytes() { - return symlink; + return uSymlink; } public final long getFileId() { @@ -275,13 +244,30 @@ public class HdfsFileStatus { return storagePolicy; } - public final FileStatus makeQualified(URI defaultUri, Path path) { - return new FileStatus(getLen(), isDir(), getReplication(), - getBlockSize(), getModificationTime(), - getAccessTime(), - getPermission(), getOwner(), getGroup(), - isSymlink() ? new Path(getSymlink()) : null, - (getFullPath(path)).makeQualified( - defaultUri, null)); // fully-qualify path + @Override + public boolean equals(Object o) { + // satisfy findbugs + return super.equals(o); } + + @Override + public int hashCode() { + // satisfy findbugs + return super.hashCode(); + } + + /** + * Resolve the short name of the Path given the URI, parent provided. This + * FileStatus reference will not contain a valid Path until it is resolved + * by this method. + * @param defaultUri FileSystem to fully qualify HDFS path. + * @param parent Parent path of this element. + * @return Reference to this instance. + */ + public final FileStatus makeQualified(URI defaultUri, Path parent) { + // fully-qualify path + setPath(getFullPath(parent).makeQualified(defaultUri, null)); + return this; // API compatibility + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 0fd203936b6..b82a860cf4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.protocol; import java.net.URI; +import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -34,7 +35,14 @@ import org.apache.hadoop.hdfs.DFSUtilClient; @InterfaceAudience.Private @InterfaceStability.Evolving public class HdfsLocatedFileStatus extends HdfsFileStatus { - private final LocatedBlocks locations; + + private static final long serialVersionUID = 0x23c73328; + + /** + * Left transient, because {@link #makeQualifiedLocated(URI,Path)} + * is the user-facing type. + */ + private transient LocatedBlocks locations; /** * Constructor @@ -56,12 +64,12 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus { */ public HdfsLocatedFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, - long access_time, FsPermission permission, String owner, String group, - byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, - int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy, - ErasureCodingPolicy ecPolicy) { + long access_time, FsPermission permission, EnumSet flags, + String owner, String group, byte[] symlink, byte[] path, long fileId, + LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo, + byte storagePolicy, ErasureCodingPolicy ecPolicy) { super(length, isdir, block_replication, blocksize, modification_time, - access_time, permission, owner, group, symlink, path, fileId, + access_time, permission, flags, owner, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy, ecPolicy); this.locations = locations; } @@ -72,13 +80,21 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus { public final LocatedFileStatus makeQualifiedLocated(URI defaultUri, Path path) { - return new LocatedFileStatus(getLen(), isDir(), getReplication(), - getBlockSize(), getModificationTime(), - getAccessTime(), - getPermission(), getOwner(), getGroup(), - isSymlink() ? new Path(getSymlink()) : null, - (getFullPath(path)).makeQualified( - defaultUri, null), // fully-qualify path + makeQualified(defaultUri, path); + return new LocatedFileStatus(this, DFSUtilClient.locatedBlocks2Locations(getBlockLocations())); } + + @Override + public boolean equals(Object o) { + // satisfy findbugs + return super.equals(o); + } + + @Override + public int hashCode() { + // satisfy findbugs + return super.hashCode(); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 583d02784ce..61e5316f80b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -21,6 +21,7 @@ import java.io.PrintStream; import java.text.SimpleDateFormat; import java.util.Comparator; import java.util.Date; +import java.util.EnumSet; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -57,11 +58,12 @@ public class SnapshottableDirectoryStatus { private final byte[] parentFullPath; public SnapshottableDirectoryStatus(long modification_time, long access_time, - FsPermission permission, String owner, String group, byte[] localName, - long inodeId, int childrenNum, - int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { + FsPermission permission, EnumSet flags, + String owner, String group, byte[] localName, long inodeId, + int childrenNum, int snapshotNumber, int snapshotQuota, + byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, - access_time, permission, owner, group, null, localName, inodeId, + access_time, permission, flags, owner, group, null, localName, inodeId, childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); this.snapshotNumber = snapshotNumber; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java index 2cd838b8fa7..f0efe762d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java @@ -68,6 +68,13 @@ public final class SystemErasureCodingPolicies { new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA, DEFAULT_CELLSIZE, RS_10_4_POLICY_ID); + // REPLICATION policy is always enabled. + private static final ErasureCodingPolicy REPLICATION_POLICY = + new ErasureCodingPolicy(ErasureCodeConstants.REPLICATION_POLICY_NAME, + ErasureCodeConstants.REPLICATION_1_2_SCHEMA, + DEFAULT_CELLSIZE, + ErasureCodeConstants.REPLICATION_POLICY_ID); + private static final List SYS_POLICIES = Collections.unmodifiableList(Arrays.asList( SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4, @@ -118,4 +125,11 @@ public final class SystemErasureCodingPolicies { public static ErasureCodingPolicy getByName(String name) { return SYSTEM_POLICIES_BY_NAME.get(name); } + + /** + * Get the special REPLICATION policy. + */ + public static ErasureCodingPolicy getReplicationPolicy() { + return REPLICATION_POLICY; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 388788c89b9..ac06c1ade8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -26,6 +26,7 @@ import java.util.List; import com.google.common.collect.Lists; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -1518,7 +1519,9 @@ public class ClientNamenodeProtocolTranslatorPB implements final SetErasureCodingPolicyRequestProto.Builder builder = SetErasureCodingPolicyRequestProto.newBuilder(); builder.setSrc(src); - builder.setEcPolicyName(ecPolicyName); + if (ecPolicyName != null) { + builder.setEcPolicyName(ecPolicyName); + } SetErasureCodingPolicyRequestProto req = builder.build(); try { rpcProxy.setErasureCodingPolicy(null, req); @@ -1758,11 +1761,11 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public HashMap getErasureCodingCodecs() throws IOException { + public Map getErasureCodingCodecs() throws IOException { try { GetErasureCodingCodecsResponseProto response = rpcProxy .getErasureCodingCodecs(null, VOID_GET_EC_CODEC_REQUEST); - HashMap ecCodecs = new HashMap(); + Map ecCodecs = new HashMap<>(); for (CodecProto codec : response.getCodecList()) { ecCodecs.put(codec.getCodec(), codec.getCoders()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index a16c679825c..f5bad296dc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntrySco import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; @@ -149,7 +150,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Sto import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; @@ -1142,7 +1142,7 @@ public class PBHelperClient { } public static FsPermission convert(FsPermissionProto p) { - return new FsPermissionExtension((short)p.getPerm()); + return new FsPermission((short)p.getPerm()); } private static Event.CreateEvent.INodeType createTypeConvert( @@ -1501,10 +1501,14 @@ public class PBHelperClient { return null; } final HdfsFileStatusProto status = sdirStatusProto.getDirStatus(); + EnumSet flags = status.hasFlags() + ? convertFlags(status.getFlags()) + : convertFlags(status.getPermission()); return new SnapshottableDirectoryStatus( status.getModificationTime(), status.getAccessTime(), convert(status.getPermission()), + flags, status.getOwner(), status.getGroup(), status.getPath().toByteArray(), @@ -1546,17 +1550,23 @@ public class PBHelperClient { } public static FsPermissionProto convert(FsPermission p) { - return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build(); + return FsPermissionProto.newBuilder().setPerm(p.toShort()).build(); } public static HdfsFileStatus convert(HdfsFileStatusProto fs) { - if (fs == null) + if (fs == null) { return null; + } + EnumSet flags = fs.hasFlags() + ? convertFlags(fs.getFlags()) + : convertFlags(fs.getPermission()); return new HdfsLocatedFileStatus( fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), fs.getBlockReplication(), fs.getBlocksize(), fs.getModificationTime(), fs.getAccessTime(), - convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), + convert(fs.getPermission()), + flags, + fs.getOwner(), fs.getGroup(), fs.getFileType().equals(FileType.IS_SYMLINK) ? fs.getSymlink().toByteArray() : null, fs.getPath().toByteArray(), @@ -1569,6 +1579,47 @@ public class PBHelperClient { fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null); } + private static EnumSet convertFlags(int flags) { + EnumSet f = + EnumSet.noneOf(HdfsFileStatus.Flags.class); + for (HdfsFileStatusProto.Flags pbf : HdfsFileStatusProto.Flags.values()) { + if ((pbf.getNumber() & flags) != 0) { + switch (pbf) { + case HAS_ACL: + f.add(HdfsFileStatus.Flags.HAS_ACL); + break; + case HAS_CRYPT: + f.add(HdfsFileStatus.Flags.HAS_CRYPT); + break; + case HAS_EC: + f.add(HdfsFileStatus.Flags.HAS_EC); + break; + default: + // ignore unknown + break; + } + } + } + return f; + } + + private static EnumSet convertFlags( + FsPermissionProto pbp) { + EnumSet f = + EnumSet.noneOf(HdfsFileStatus.Flags.class); + FsPermission p = new FsPermissionExtension((short)pbp.getPerm()); + if (p.getAclBit()) { + f.add(HdfsFileStatus.Flags.HAS_ACL); + } + if (p.getEncryptedBit()) { + f.add(HdfsFileStatus.Flags.HAS_CRYPT); + } + if (p.getErasureCodedBit()) { + f.add(HdfsFileStatus.Flags.HAS_EC); + } + return f; + } + public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { if (c == null) return null; @@ -2044,7 +2095,7 @@ public class PBHelperClient { if (fs == null) return null; FileType fType = FileType.IS_FILE; - if (fs.isDir()) { + if (fs.isDirectory()) { fType = FileType.IS_DIR; } else if (fs.isSymlink()) { fType = FileType.IS_SYMLINK; @@ -2082,6 +2133,10 @@ public class PBHelperClient { builder.setEcPolicy(convertErasureCodingPolicy( fs.getErasureCodingPolicy())); } + int flags = fs.hasAcl() ? HdfsFileStatusProto.Flags.HAS_ACL_VALUE : 0; + flags |= fs.isEncrypted() ? HdfsFileStatusProto.Flags.HAS_CRYPT_VALUE : 0; + flags |= fs.isErasureCoded() ? HdfsFileStatusProto.Flags.HAS_EC_VALUE : 0; + builder.setFlags(flags); return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 5e9396e368d..7ec5fe5c15f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -61,6 +60,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -97,17 +97,8 @@ class JsonUtilClient { } /** Convert a string to a FsPermission object. */ - static FsPermission toFsPermission( - final String s, Boolean aclBit, Boolean encBit, Boolean erasureBit) { - FsPermission perm = new FsPermission(Short.parseShort(s, 8)); - final boolean aBit = (aclBit != null) ? aclBit : false; - final boolean eBit = (encBit != null) ? encBit : false; - final boolean ecBit = (erasureBit != null) ? erasureBit : false; - if (aBit || eBit || ecBit) { - return new FsPermissionExtension(perm, aBit, eBit, ecBit); - } else { - return perm; - } + static FsPermission toFsPermission(final String s) { + return null == s ? null : new FsPermission(Short.parseShort(s, 8)); } /** Convert a Json map to a HdfsFileStatus object. */ @@ -128,10 +119,23 @@ class JsonUtilClient { final long len = ((Number) m.get("length")).longValue(); final String owner = (String) m.get("owner"); final String group = (String) m.get("group"); - final FsPermission permission = toFsPermission((String) m.get("permission"), - (Boolean) m.get("aclBit"), - (Boolean) m.get("encBit"), - (Boolean) m.get("ecBit")); + final FsPermission permission = toFsPermission((String)m.get("permission")); + + Boolean aclBit = (Boolean) m.get("aclBit"); + Boolean encBit = (Boolean) m.get("encBit"); + Boolean erasureBit = (Boolean) m.get("ecBit"); + EnumSet f = + EnumSet.noneOf(HdfsFileStatus.Flags.class); + if (aclBit != null && aclBit) { + f.add(HdfsFileStatus.Flags.HAS_ACL); + } + if (encBit != null && encBit) { + f.add(HdfsFileStatus.Flags.HAS_CRYPT); + } + if (erasureBit != null && erasureBit) { + f.add(HdfsFileStatus.Flags.HAS_EC); + } + final long aTime = ((Number) m.get("accessTime")).longValue(); final long mTime = ((Number) m.get("modificationTime")).longValue(); final long blockSize = ((Number) m.get("blockSize")).longValue(); @@ -143,11 +147,11 @@ class JsonUtilClient { final byte storagePolicy = m.containsKey("storagePolicy") ? (byte) ((Number) m.get("storagePolicy")).longValue() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; - return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, - replication, blockSize, mTime, aTime, permission, owner, group, - symlink, DFSUtilClient.string2Bytes(localName), - fileId, childrenNum, null, - storagePolicy, null); + return new HdfsFileStatus(len, + type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize, + mTime, aTime, permission, f, owner, group, symlink, + DFSUtilClient.string2Bytes(localName), fileId, childrenNum, + null, storagePolicy, null); } static HdfsFileStatus[] toHdfsFileStatusArray(final Map json) { @@ -465,9 +469,7 @@ class JsonUtilClient { aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit")); String permString = (String) m.get("permission"); if (permString != null) { - final FsPermission permission = toFsPermission(permString, - (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"), - (Boolean) m.get("ecBit")); + final FsPermission permission = toFsPermission(permString); aclStatusBuilder.setPermission(permission); } final List entries = (List) m.get("entries"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java index 50da8998264..f690dd0039d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java @@ -32,7 +32,13 @@ public class WebHdfsConstants { FILE, DIRECTORY, SYMLINK; static PathType valueOf(HdfsFileStatus status) { - return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE; + if (status.isDirectory()) { + return DIRECTORY; + } + if (status.isSymlink()) { + return SYMLINK; + } + return FILE; } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 3861cbaf015..1159e50de7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -1016,15 +1016,7 @@ public class WebHdfsFileSystem extends FileSystem public FileStatus getFileStatus(Path f) throws IOException { statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS); - return makeQualified(getHdfsFileStatus(f), f); - } - - private FileStatus makeQualified(HdfsFileStatus f, Path parent) { - return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), - f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), - f.getPermission(), f.getOwner(), f.getGroup(), - f.isSymlink() ? new Path(f.getSymlink()) : null, - f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); + return getHdfsFileStatus(f).makeQualified(getUri(), f); } @Override @@ -1507,6 +1499,7 @@ public class WebHdfsFileSystem extends FileSystem statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.LIST_STATUS); + final URI fsUri = getUri(); final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; return new FsPathResponseRunner(op, f) { @Override @@ -1515,7 +1508,7 @@ public class WebHdfsFileSystem extends FileSystem JsonUtilClient.toHdfsFileStatusArray(json); final FileStatus[] statuses = new FileStatus[hdfsStatuses.length]; for (int i = 0; i < hdfsStatuses.length; i++) { - statuses[i] = makeQualified(hdfsStatuses[i], f); + statuses[i] = hdfsStatuses[i].makeQualified(fsUri, f); } return statuses; @@ -1541,10 +1534,11 @@ public class WebHdfsFileSystem extends FileSystem } }.run(); // Qualify the returned FileStatus array + final URI fsUri = getUri(); final HdfsFileStatus[] statuses = listing.getPartialListing(); FileStatus[] qualified = new FileStatus[statuses.length]; for (int i = 0; i < statuses.length; i++) { - qualified[i] = makeQualified(statuses[i], f); + qualified[i] = statuses[i].makeQualified(fsUri, f); } return new DirectoryEntries(qualified, listing.getLastName(), listing.hasMore()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto index bb7fdb0168f..c2529c90c32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto @@ -21,7 +21,12 @@ option java_outer_classname = "AclProtos"; option java_generate_equals_and_hash = true; package hadoop.hdfs; -import "hdfs.proto"; +/** + * File or Directory permision - same spec as posix + */ +message FsPermissionProto { + required uint32 perm = 1; // Actually a short - only 16bits used +} message AclEntryProto { enum AclEntryScopeProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto index 65baab65a47..9f803503c39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto @@ -25,7 +25,7 @@ import "hdfs.proto"; message SetErasureCodingPolicyRequestProto { required string src = 1; - required string ecPolicyName = 2; + optional string ecPolicyName = 2; } message SetErasureCodingPolicyResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index 497d734445d..465da854e4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs; import "Security.proto"; +import "acl.proto"; /** * Extended block idenfies a block @@ -198,13 +199,6 @@ message CorruptFileBlocksProto { required string cookie = 2; } -/** - * File or Directory permision - same spec as posix - */ -message FsPermissionProto { - required uint32 perm = 1; // Actually a short - only 16bits used -} - /** * Types of recognized storage media. */ @@ -390,6 +384,11 @@ message HdfsFileStatusProto { IS_FILE = 2; IS_SYMLINK = 3; } + enum Flags { + HAS_ACL = 0x01; // has ACLs + HAS_CRYPT = 0x02; // encrypted + HAS_EC = 0x04; // erasure coded + } required FileType fileType = 1; required bytes path = 2; // local name of inode encoded java UTF8 required uint64 length = 3; @@ -417,6 +416,9 @@ message HdfsFileStatusProto { // Optional field for erasure coding optional ErasureCodingPolicyProto ecPolicy = 17; + + // Set of flags + optional uint32 flags = 18 [default = 0]; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 1ab890f3f50..1059a02f127 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -124,6 +124,8 @@ public class HttpFSFileSystem extends FileSystem public static final String POLICY_NAME_PARAM = "storagepolicy"; public static final String OFFSET_PARAM = "offset"; public static final String LENGTH_PARAM = "length"; + public static final String SNAPSHOT_NAME_PARAM = "snapshotname"; + public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname"; public static final Short DEFAULT_PERMISSION = 0755; public static final String ACLSPEC_DEFAULT = ""; @@ -144,6 +146,8 @@ public class HttpFSFileSystem extends FileSystem public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream"; + public static final String SNAPSHOT_JSON = "Path"; + public enum FILE_TYPE { FILE, DIRECTORY, SYMLINK; @@ -229,7 +233,9 @@ public class HttpFSFileSystem extends FileSystem DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET), REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET), GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET), - SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST); + SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST), + CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE), + RENAMESNAPSHOT(HTTP_PUT); private String httpMethod; @@ -1047,18 +1053,7 @@ public class HttpFSFileSystem extends FileSystem /** Convert a string to a FsPermission object. */ static FsPermission toFsPermission(JSONObject json) { final String s = (String) json.get(PERMISSION_JSON); - final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON); - final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON); - final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON); - FsPermission perm = new FsPermission(Short.parseShort(s, 8)); - final boolean aBit = (aclBit != null) ? aclBit : false; - final boolean eBit = (encBit != null) ? encBit : false; - final boolean ecBit = (erasureBit != null) ? erasureBit : false; - if (aBit || eBit || ecBit) { - return new FsPermissionExtension(perm, aBit, eBit, ecBit); - } else { - return perm; - } + return new FsPermission(Short.parseShort(s, 8)); } private FileStatus createFileStatus(Path parent, JSONObject json) { @@ -1073,23 +1068,23 @@ public class HttpFSFileSystem extends FileSystem long mTime = (Long) json.get(MODIFICATION_TIME_JSON); long blockSize = (Long) json.get(BLOCK_SIZE_JSON); short replication = ((Long) json.get(REPLICATION_JSON)).shortValue(); - FileStatus fileStatus = null; - switch (type) { - case FILE: - case DIRECTORY: - fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY), - replication, blockSize, mTime, aTime, - permission, owner, group, path); - break; - case SYMLINK: - Path symLink = null; - fileStatus = new FileStatus(len, false, - replication, blockSize, mTime, aTime, - permission, owner, group, symLink, - path); + final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON); + final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON); + final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON); + final boolean aBit = (aclBit != null) ? aclBit : false; + final boolean eBit = (encBit != null) ? encBit : false; + final boolean ecBit = (erasureBit != null) ? erasureBit : false; + if (aBit || eBit || ecBit) { + // include this for compatibility with 2.x + FsPermissionExtension deprecatedPerm = + new FsPermissionExtension(permission, aBit, eBit, ecBit); + return new FileStatus(len, FILE_TYPE.DIRECTORY == type, + replication, blockSize, mTime, aTime, deprecatedPerm, owner, group, + null, path, aBit, eBit, ecBit); } - return fileStatus; + return new FileStatus(len, FILE_TYPE.DIRECTORY == type, + replication, blockSize, mTime, aTime, permission, owner, group, path); } /** @@ -1445,4 +1440,43 @@ public class HttpFSFileSystem extends FileSystem Operation.UNSETSTORAGEPOLICY.getMethod(), params, src, true); HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); } + + @Override + public final Path createSnapshot(Path path, String snapshotName) + throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.CREATESNAPSHOT.toString()); + if (snapshotName != null) { + params.put(SNAPSHOT_NAME_PARAM, snapshotName); + } + HttpURLConnection conn = getConnection(Operation.CREATESNAPSHOT.getMethod(), + params, path, true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return new Path((String) json.get(SNAPSHOT_JSON)); + } + + @Override + public void renameSnapshot(Path path, String snapshotOldName, + String snapshotNewName) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.RENAMESNAPSHOT.toString()); + params.put(SNAPSHOT_NAME_PARAM, snapshotNewName); + params.put(OLD_SNAPSHOT_NAME_PARAM, snapshotOldName); + HttpURLConnection conn = getConnection(Operation.RENAMESNAPSHOT.getMethod(), + params, path, true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + } + + @Override + public void deleteSnapshot(Path path, String snapshotName) + throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.DELETESNAPSHOT.toString()); + params.put(SNAPSHOT_NAME_PARAM, snapshotName); + HttpURLConnection conn = getConnection(Operation.DELETESNAPSHOT.getMethod(), + params, path, true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index f1615c3e678..4b5918abf50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -669,7 +669,7 @@ public class FSOperations { /** * Creates a list-status executor. * - * @param path the directory to retrieve the status of its contents. + * @param path the directory/file to retrieve the status of its contents. * @param filter glob filter to use. * * @throws IOException thrown if the filter expression is incorrect. @@ -1492,4 +1492,109 @@ public class FSOperations { return JsonUtil.toJsonMap(locations); } } + + /** + * Executor that performs a createSnapshot FileSystemAccess operation. + */ + @InterfaceAudience.Private + public static class FSCreateSnapshot implements + FileSystemAccess.FileSystemExecutor { + + private Path path; + private String snapshotName; + + /** + * Creates a createSnapshot executor. + * @param path directory path to be snapshotted. + * @param snapshotName the snapshot name. + */ + public FSCreateSnapshot(String path, String snapshotName) { + this.path = new Path(path); + this.snapshotName = snapshotName; + } + + /** + * Executes the filesystem operation. + * @param fs filesystem instance to use. + * @return Path the complete path for newly created snapshot + * @throws IOException thrown if an IO error occurred. + */ + @Override + public String execute(FileSystem fs) throws IOException { + Path snapshotPath = fs.createSnapshot(path, snapshotName); + JSONObject json = toJSON(HttpFSFileSystem.HOME_DIR_JSON, + snapshotPath.toString()); + return json.toJSONString().replaceAll("\\\\", ""); + } + } + + /** + * Executor that performs a deleteSnapshot FileSystemAccess operation. + */ + @InterfaceAudience.Private + public static class FSDeleteSnapshot implements + FileSystemAccess.FileSystemExecutor { + + private Path path; + private String snapshotName; + + /** + * Creates a deleteSnapshot executor. + * @param path path for the snapshot to be deleted. + * @param snapshotName snapshot name. + */ + public FSDeleteSnapshot(String path, String snapshotName) { + this.path = new Path(path); + this.snapshotName = snapshotName; + } + + /** + * Executes the filesystem operation. + * @param fs filesystem instance to use. + * @return void + * @throws IOException thrown if an IO error occurred. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + fs.deleteSnapshot(path, snapshotName); + return null; + } + } + + /** + * Executor that performs a renameSnapshot FileSystemAccess operation. + */ + @InterfaceAudience.Private + public static class FSRenameSnapshot implements + FileSystemAccess.FileSystemExecutor { + private Path path; + private String oldSnapshotName; + private String snapshotName; + + /** + * Creates a renameSnapshot executor. + * @param path directory path of the snapshot to be renamed. + * @param oldSnapshotName current snapshot name. + * @param snapshotName new snapshot name to be set. + */ + public FSRenameSnapshot(String path, String oldSnapshotName, + String snapshotName) { + this.path = new Path(path); + this.oldSnapshotName = oldSnapshotName; + this.snapshotName = snapshotName; + } + + /** + * Executes the filesystem operation. + * @param fs filesystem instance to use. + * @return void + * @throws IOException thrown if an IO error occurred. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + fs.renameSnapshot(path, oldSnapshotName, snapshotName); + return null; + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 347a74750dc..5f265c09852 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -100,6 +100,13 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.SETSTORAGEPOLICY, new Class[] {PolicyNameParam.class}); PARAMS_DEF.put(Operation.UNSETSTORAGEPOLICY, new Class[] {}); + PARAMS_DEF.put(Operation.CREATESNAPSHOT, + new Class[] {SnapshotNameParam.class}); + PARAMS_DEF.put(Operation.DELETESNAPSHOT, + new Class[] {SnapshotNameParam.class}); + PARAMS_DEF.put(Operation.RENAMESNAPSHOT, + new Class[] {OldSnapshotNameParam.class, + SnapshotNameParam.class}); } public HttpFSParametersProvider() { @@ -565,4 +572,42 @@ public class HttpFSParametersProvider extends ParametersProvider { super(NAME, null); } } + + /** + * Class for SnapshotName parameter. + */ + public static class SnapshotNameParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.SNAPSHOT_NAME_PARAM; + + /** + * Constructor. + */ + public SnapshotNameParam() { + super(NAME, null); + } + + } + + /** + * Class for OldSnapshotName parameter. + */ + public static class OldSnapshotNameParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OLD_SNAPSHOT_NAME_PARAM; + + /** + * Constructor. + */ + public OldSnapshotNameParam() { + super(NAME, null); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 5c0c9b5f967..03ccb4caa04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam; +import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OldSnapshotNameParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam; @@ -45,6 +46,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PolicyNameParam import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam; +import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SnapshotNameParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam; @@ -430,6 +432,16 @@ public class HttpFSServer { response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } + case DELETESNAPSHOT: { + String snapshotName = params.get(SnapshotNameParam.NAME, + SnapshotNameParam.class); + FSOperations.FSDeleteSnapshot command = + new FSOperations.FSDeleteSnapshot(path, snapshotName); + fsExecute(user, command); + AUDIT_LOG.info("[{}] deleted snapshot [{}]", path, snapshotName); + response = Response.ok().build(); + break; + } default: { throw new IOException( MessageFormat.format("Invalid HTTP DELETE operation [{0}]", @@ -602,6 +614,16 @@ public class HttpFSServer { } break; } + case CREATESNAPSHOT: { + String snapshotName = params.get(SnapshotNameParam.NAME, + SnapshotNameParam.class); + FSOperations.FSCreateSnapshot command = + new FSOperations.FSCreateSnapshot(path, snapshotName); + String json = fsExecute(user, command); + AUDIT_LOG.info("[{}] snapshot created as [{}]", path, snapshotName); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } case SETXATTR: { String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class); @@ -617,6 +639,20 @@ public class HttpFSServer { response = Response.ok().build(); break; } + case RENAMESNAPSHOT: { + String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, + OldSnapshotNameParam.class); + String snapshotName = params.get(SnapshotNameParam.NAME, + SnapshotNameParam.class); + FSOperations.FSRenameSnapshot command = + new FSOperations.FSRenameSnapshot(path, oldSnapshotName, + snapshotName); + fsExecute(user, command); + AUDIT_LOG.info("[{}] renamed snapshot [{}] to [{}]", path, + oldSnapshotName, snapshotName); + response = Response.ok().build(); + break; + } case REMOVEXATTR: { String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class); FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr( diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh index 6301e274866..85cbc6682c7 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh @@ -16,7 +16,7 @@ # limitations under the License. if [[ "${HADOOP_SHELL_EXECNAME}" = hdfs ]]; then - hadoop_add_subcommand "httpfs" "run HttpFS server, the HDFS HTTP Gateway" + hadoop_add_subcommand "httpfs" daemon "run HttpFS server, the HDFS HTTP Gateway" fi ## @description Command handler for httpfs subcommand diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index e23093e0721..2cd89344aa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.UserGroupInformation; @@ -74,6 +75,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.regex.Pattern; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -852,10 +854,12 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { } } - private static void assertSameAclBit(FileSystem expected, FileSystem actual, + private static void assertSameAcls(FileSystem expected, FileSystem actual, Path path) throws IOException { FileStatus expectedFileStatus = expected.getFileStatus(path); FileStatus actualFileStatus = actual.getFileStatus(path); + assertEquals(actualFileStatus.hasAcl(), expectedFileStatus.hasAcl()); + // backwards compat assertEquals(actualFileStatus.getPermission().getAclBit(), expectedFileStatus.getPermission().getAclBit()); } @@ -888,31 +892,31 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { AclStatus proxyAclStat = proxyFs.getAclStatus(path); AclStatus httpfsAclStat = httpfs.getAclStatus(path); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, path); + assertSameAcls(httpfs, proxyFs, path); httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true)); proxyAclStat = proxyFs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, path); + assertSameAcls(httpfs, proxyFs, path); httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true)); proxyAclStat = proxyFs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, path); + assertSameAcls(httpfs, proxyFs, path); httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false)); proxyAclStat = proxyFs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, path); + assertSameAcls(httpfs, proxyFs, path); httpfs.removeAcl(path); proxyAclStat = proxyFs.getAclStatus(path); httpfsAclStat = httpfs.getAclStatus(path); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, path); + assertSameAcls(httpfs, proxyFs, path); } /** @@ -935,21 +939,21 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { AclStatus proxyAclStat = proxyFs.getAclStatus(dir); AclStatus httpfsAclStat = httpfs.getAclStatus(dir); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, dir); + assertSameAcls(httpfs, proxyFs, dir); /* Set a default ACL on the directory */ httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true))); proxyAclStat = proxyFs.getAclStatus(dir); httpfsAclStat = httpfs.getAclStatus(dir); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, dir); + assertSameAcls(httpfs, proxyFs, dir); /* Remove the default ACL */ httpfs.removeDefaultAcl(dir); proxyAclStat = proxyFs.getAclStatus(dir); httpfsAclStat = httpfs.getAclStatus(dir); assertSameAcls(httpfsAclStat, proxyAclStat); - assertSameAclBit(httpfs, proxyFs, dir); + assertSameAcls(httpfs, proxyFs, dir); } private void testEncryption() throws Exception { @@ -1033,11 +1037,12 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { } protected enum Operation { - GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, + GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH, - GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS + GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS, + CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT } private void operation(Operation op) throws Exception { @@ -1129,6 +1134,15 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { case GETFILEBLOCKLOCATIONS: testGetFileBlockLocations(); break; + case CREATE_SNAPSHOT: + testCreateSnapshot(); + break; + case RENAME_SNAPSHOT: + testRenameSnapshot(); + break; + case DELETE_SNAPSHOT: + testDeleteSnapshot(); + break; } } @@ -1256,4 +1270,98 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { location2.getTopologyPaths()); } } + + private void testCreateSnapshot(String snapshotName) throws Exception { + if (!this.isLocalFS()) { + Path snapshottablePath = new Path("/tmp/tmp-snap-test"); + createSnapshotTestsPreconditions(snapshottablePath); + //Now get the FileSystem instance that's being tested + FileSystem fs = this.getHttpFSFileSystem(); + if (snapshotName == null) { + fs.createSnapshot(snapshottablePath); + } else { + fs.createSnapshot(snapshottablePath, snapshotName); + } + Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot"); + FileStatus[] snapshotItems = fs.listStatus(snapshotsDir); + assertTrue("Should have exactly one snapshot.", + snapshotItems.length == 1); + String resultingSnapName = snapshotItems[0].getPath().getName(); + if (snapshotName == null) { + assertTrue("Snapshot auto generated name not matching pattern", + Pattern.matches("(s)(\\d{8})(-)(\\d{6})(\\.)(\\d{3})", + resultingSnapName)); + } else { + assertTrue("Snapshot name is not same as passed name.", + snapshotName.equals(resultingSnapName)); + } + cleanSnapshotTests(snapshottablePath, resultingSnapName); + } + } + + private void testCreateSnapshot() throws Exception { + testCreateSnapshot(null); + testCreateSnapshot("snap-with-name"); + } + + private void createSnapshotTestsPreconditions(Path snapshottablePath) + throws Exception { + //Needed to get a DistributedFileSystem instance, in order to + //call allowSnapshot on the newly created directory + DistributedFileSystem distributedFs = (DistributedFileSystem) + FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf()); + distributedFs.mkdirs(snapshottablePath); + distributedFs.allowSnapshot(snapshottablePath); + Path subdirPath = new Path("/tmp/tmp-snap-test/subdir"); + distributedFs.mkdirs(subdirPath); + + } + + private void cleanSnapshotTests(Path snapshottablePath, + String resultingSnapName) throws Exception { + DistributedFileSystem distributedFs = (DistributedFileSystem) + FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf()); + distributedFs.deleteSnapshot(snapshottablePath, resultingSnapName); + distributedFs.delete(snapshottablePath, true); + } + + private void testRenameSnapshot() throws Exception { + if (!this.isLocalFS()) { + Path snapshottablePath = new Path("/tmp/tmp-snap-test"); + createSnapshotTestsPreconditions(snapshottablePath); + //Now get the FileSystem instance that's being tested + FileSystem fs = this.getHttpFSFileSystem(); + fs.createSnapshot(snapshottablePath, "snap-to-rename"); + fs.renameSnapshot(snapshottablePath, "snap-to-rename", + "snap-new-name"); + Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot"); + FileStatus[] snapshotItems = fs.listStatus(snapshotsDir); + assertTrue("Should have exactly one snapshot.", + snapshotItems.length == 1); + String resultingSnapName = snapshotItems[0].getPath().getName(); + assertTrue("Snapshot name is not same as passed name.", + "snap-new-name".equals(resultingSnapName)); + cleanSnapshotTests(snapshottablePath, resultingSnapName); + } + } + + private void testDeleteSnapshot() throws Exception { + if (!this.isLocalFS()) { + Path snapshottablePath = new Path("/tmp/tmp-snap-test"); + createSnapshotTestsPreconditions(snapshottablePath); + //Now get the FileSystem instance that's being tested + FileSystem fs = this.getHttpFSFileSystem(); + fs.createSnapshot(snapshottablePath, "snap-to-delete"); + Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot"); + FileStatus[] snapshotItems = fs.listStatus(snapshotsDir); + assertTrue("Should have exactly one snapshot.", + snapshotItems.length == 1); + fs.deleteSnapshot(snapshottablePath, "snap-to-delete"); + snapshotItems = fs.listStatus(snapshotsDir); + assertTrue("There should be no snapshot anymore.", + snapshotItems.length == 0); + fs.delete(snapshottablePath, true); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index 0e1cc20177e..60e70d2e6f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.http.server; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; @@ -71,6 +72,7 @@ import org.eclipse.jetty.webapp.WebAppContext; import com.google.common.collect.Maps; import java.util.Properties; +import java.util.regex.Pattern; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; /** @@ -465,6 +467,20 @@ public class TestHttpFSServer extends HFSTestCase { */ private void putCmd(String filename, String command, String params) throws Exception { + Assert.assertEquals(HttpURLConnection.HTTP_OK, + putCmdWithReturn(filename, command, params).getResponseCode()); + } + + /** + * General-purpose http PUT command to the httpfs server, + * which returns relted HttpURLConnection instance. + * @param filename The file to operate upon + * @param command The command to perform (SETACL, etc) + * @param params Parameters, like "aclspec=..." + * @return HttpURLConnection the HttpURLConnection instance for the given PUT + */ + private HttpURLConnection putCmdWithReturn(String filename, String command, + String params) throws Exception { String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; // Remove leading / from filename if (filename.charAt(0) == '/') { @@ -478,7 +494,7 @@ public class TestHttpFSServer extends HFSTestCase { HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod("PUT"); conn.connect(); - Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + return conn; } /** @@ -882,6 +898,108 @@ public class TestHttpFSServer extends HFSTestCase { delegationTokenCommonTests(false); } + private HttpURLConnection snapshotTestPreconditions(String httpMethod, + String snapOperation, + String additionalParams) + throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( + "/webhdfs/v1/tmp/tmp-snap-test/subdir?user.name={0}&op=MKDIRS", + user)); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod("PUT"); + conn.connect(); + + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + + //needed to make the given dir snapshottable + Path snapshottablePath = new Path("/tmp/tmp-snap-test"); + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(snapshottablePath.toUri(), + TestHdfsHelper.getHdfsConf()); + dfs.allowSnapshot(snapshottablePath); + + //Try to create snapshot passing snapshot name + url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( + "/webhdfs/v1/tmp/tmp-snap-test?user.name={0}&op={1}&{2}", user, + snapOperation, additionalParams)); + conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod(httpMethod); + conn.connect(); + return conn; + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testCreateSnapshot() throws Exception { + createHttpFSServer(false, false); + final HttpURLConnection conn = snapshotTestPreconditions("PUT", + "CREATESNAPSHOT", + "snapshotname=snap-with-name"); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + final BufferedReader reader = + new BufferedReader(new InputStreamReader(conn.getInputStream())); + String result = reader.readLine(); + //Validates if the content format is correct + Assert.assertTrue(result. + equals("{\"Path\":\"/tmp/tmp-snap-test/.snapshot/snap-with-name\"}")); + //Validates if the snapshot is properly created under .snapshot folder + result = getStatus("/tmp/tmp-snap-test/.snapshot", + "LISTSTATUS"); + Assert.assertTrue(result.contains("snap-with-name")); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testCreateSnapshotNoSnapshotName() throws Exception { + createHttpFSServer(false, false); + final HttpURLConnection conn = snapshotTestPreconditions("PUT", + "CREATESNAPSHOT", + ""); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + final BufferedReader reader = new BufferedReader( + new InputStreamReader(conn.getInputStream())); + String result = reader.readLine(); + //Validates if the content format is correct + Assert.assertTrue(Pattern.matches( + "(\\{\\\"Path\\\"\\:\\\"/tmp/tmp-snap-test/.snapshot/s)" + + "(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\"\\})", result)); + //Validates if the snapshot is properly created under .snapshot folder + result = getStatus("/tmp/tmp-snap-test/.snapshot", + "LISTSTATUS"); + + Assert.assertTrue(Pattern.matches("(.+)(\\\"pathSuffix\\\":\\\"s)" + + "(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\")(.+)", + result)); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testRenameSnapshot() throws Exception { + createHttpFSServer(false, false); + HttpURLConnection conn = snapshotTestPreconditions("PUT", + "CREATESNAPSHOT", + "snapshotname=snap-to-rename"); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + conn = snapshotTestPreconditions("PUT", + "RENAMESNAPSHOT", + "oldsnapshotname=snap-to-rename" + + "&snapshotname=snap-renamed"); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + //Validates the snapshot is properly renamed under .snapshot folder + String result = getStatus("/tmp/tmp-snap-test/.snapshot", + "LISTSTATUS"); + Assert.assertTrue(result.contains("snap-renamed")); + //There should be no snapshot named snap-to-rename now + Assert.assertFalse(result.contains("snap-to-rename")); + } + @Test @TestDir @TestJetty @@ -890,4 +1008,24 @@ public class TestHttpFSServer extends HFSTestCase { createHttpFSServer(true, true); delegationTokenCommonTests(true); } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testDeleteSnapshot() throws Exception { + createHttpFSServer(false, false); + HttpURLConnection conn = snapshotTestPreconditions("PUT", + "CREATESNAPSHOT", + "snapshotname=snap-to-delete"); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + conn = snapshotTestPreconditions("DELETE", + "DELETESNAPSHOT", + "snapshotname=snap-to-delete"); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + //Validates the snapshot is not under .snapshot folder anymore + String result = getStatus("/tmp/tmp-snap-test/.snapshot", + "LISTSTATUS"); + Assert.assertFalse(result.contains("snap-to-delete")); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index cc17394197a..abaa5cad648 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -65,7 +65,9 @@ public class Nfs3Utils { * client takes only the lower 32bit of the fileId and treats it as signed * int. When the 32th bit is 1, the client considers it invalid. */ - NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG; + NfsFileType fileType = fs.isDirectory() + ? NfsFileType.NFSDIR + : NfsFileType.NFSREG; fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType; int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs @@ -98,7 +100,7 @@ public class Nfs3Utils { return null; } - long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat + long size = fstat.isDirectory() ? getDirSize(fstat.getChildrenNum()) : fstat .getLen(); return new WccAttr(size, new NfsTime(fstat.getModificationTime()), new NfsTime(fstat.getModificationTime())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index d6bb71d0d9c..7a6aa89fde7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -1208,7 +1208,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (fstat == null) { return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } - if (fstat.isDir()) { + if (fstat.isDirectory()) { return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc); } @@ -1289,7 +1289,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (fstat == null) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } - if (!fstat.isDir()) { + if (!fstat.isDirectory()) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc); } @@ -1565,7 +1565,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_STALE); } - if (!dirStatus.isDir()) { + if (!dirStatus.isDirectory()) { LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR); @@ -1732,7 +1732,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE); } - if (!dirStatus.isDir()) { + if (!dirStatus.isDirectory()) { LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR); diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 83f3e9f368c..d5db8b3026c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -266,4 +266,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index b8364d8dcc3..bebb8d1ef82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -169,8 +169,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> compile - xerces - xercesImpl + io.netty + netty-all compile diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 914635e6210..61e48088dea 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -1,4 +1,4 @@ -#!/usr/bin/env bash + #!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with @@ -31,45 +31,47 @@ function hadoop_usage hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" hadoop_add_option "--workers" "turn on worker mode" - hadoop_add_subcommand "balancer" "run a cluster balancing utility" - hadoop_add_subcommand "cacheadmin" "configure the HDFS cache" - hadoop_add_subcommand "cblock" "cblock CLI" - hadoop_add_subcommand "cblockserver" "run cblock server" - hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries" - hadoop_add_subcommand "corona" "run an ozone data generator" - hadoop_add_subcommand "crypto" "configure HDFS encryption zones" - hadoop_add_subcommand "datanode" "run a DFS datanode" - hadoop_add_subcommand "debug" "run a Debug Admin to execute HDFS debug commands" - hadoop_add_subcommand "dfs" "run a filesystem command on the file system" - hadoop_add_subcommand "dfsadmin" "run a DFS admin client" - hadoop_add_subcommand "diskbalancer" "Distributes data evenly among disks on a given node" - hadoop_add_subcommand "envvars" "display computed Hadoop environment variables" - hadoop_add_subcommand "ec" "run a HDFS ErasureCoding CLI" - hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode" - hadoop_add_subcommand "fsck" "run a DFS filesystem checking utility" - hadoop_add_subcommand "getconf" "get config values from configuration" - hadoop_add_subcommand "groups" "get the groups which users belong to" - hadoop_add_subcommand "haadmin" "run a DFS HA admin client" - hadoop_add_subcommand "jmxget" "get JMX exported values from NameNode or DataNode." - hadoop_add_subcommand "journalnode" "run the DFS journalnode" - hadoop_add_subcommand "jscsi" "run cblock jscsi server" - hadoop_add_subcommand "ksm" "Ozone keyspace manager" - hadoop_add_subcommand "lsSnapshottableDir" "list all snapshottable dirs owned by the current user" - hadoop_add_subcommand "mover" "run a utility to move block replicas across storage types" - hadoop_add_subcommand "namenode" "run the DFS namenode" - hadoop_add_subcommand "nfs3" "run an NFS version 3 gateway" - hadoop_add_subcommand "oev" "apply the offline edits viewer to an edits file" - hadoop_add_subcommand "oiv" "apply the offline fsimage viewer to an fsimage" - hadoop_add_subcommand "oiv_legacy" "apply the offline fsimage viewer to a legacy fsimage" - hadoop_add_subcommand "oz" "command line interface for ozone" - hadoop_add_subcommand "oz_debug" "ozone debug tool, convert ozone meta data db into sqlite db" - hadoop_add_subcommand "portmap" "run a portmap service" - hadoop_add_subcommand "scm" "run the Storage Container Manager service" - hadoop_add_subcommand "secondarynamenode" "run the DFS secondary namenode" - hadoop_add_subcommand "snapshotDiff" "diff two snapshots of a directory or diff the current directory contents with a snapshot" - hadoop_add_subcommand "storagepolicies" "list/get/set block storage policies" - hadoop_add_subcommand "version" "print the version" - hadoop_add_subcommand "zkfc" "run the ZK Failover Controller daemon" + + hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility" + hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache" + hadoop_add_subcommand "cblock" admin "cblock CLI" + hadoop_add_subcommand "cblockserver" daemon "run cblock server" + hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" + hadoop_add_subcommand "corona" client "run an ozone data generator" + hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones" + hadoop_add_subcommand "datanode" daemon "run a DFS datanode" + hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands" + hadoop_add_subcommand "dfs" client "run a filesystem command on the file system" + hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client" + hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI" + hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode" + hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility" + hadoop_add_subcommand "getconf" client "get config values from configuration" + hadoop_add_subcommand "groups" client "get the groups which users belong to" + hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client" + hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." + hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode" + hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server" + hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager" + hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user" + hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types" + hadoop_add_subcommand "namenode" daemon "run the DFS namenode" + hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway" + hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file" + hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage" + hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage" + hadoop_add_subcommand "oz" client "command line interface for ozone" + hadoop_add_subcommand "oz_debug" client "ozone debug tool, convert ozone metadata into relational data" + hadoop_add_subcommand "portmap" daemon "run a portmap service" + hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" + hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode" + hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot" + hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies" + hadoop_add_subcommand "version" client "print the version" + hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon" + hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 1f60f3249c2..f4c383e84f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -269,7 +269,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY = "dfs.namenode.posix.acl.inheritance.enabled"; public static final boolean - DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = false; + DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = true; public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled"; public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true; public static final String DFS_ADMIN = "dfs.cluster.administrators"; @@ -564,6 +564,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = ""; public static final String DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY = "dfs.namenode.ec.policies.max.cellsize"; public static final int DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT = 4 * 1024 * 1024; + public static final String DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY = + "dfs.namenode.ec.system.default.policy"; + public static final String DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT = + "RS-6-3-64k"; public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads"; public static final int DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20; public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size"; @@ -1031,7 +1035,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_PIPELINE_ECN_ENABLED_DEFAULT = false; // Key Provider Cache Expiry - public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED = + public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED = "dfs.datanode.block-pinning.enabled"; public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java index e9c5b2a4b19..49f3eaaba2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java @@ -30,4 +30,8 @@ public class SnapshotException extends IOException { public SnapshotException(final Throwable cause) { super(cause); } + + public SnapshotException(final String message, final Throwable cause) { + super(message, cause); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java index 3ddfc85638a..676e8276f25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 4ac49fe12f7..a4462769020 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -1488,7 +1487,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, SetErasureCodingPolicyRequestProto req) throws ServiceException { try { - server.setErasureCodingPolicy(req.getSrc(), req.getEcPolicyName()); + String ecPolicyName = req.hasEcPolicyName() ? + req.getEcPolicyName() : null; + server.setErasureCodingPolicy(req.getSrc(), ecPolicyName); return SetErasureCodingPolicyResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -1662,7 +1663,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, GetErasureCodingCodecsRequestProto request) throws ServiceException { try { - HashMap codecs = server.getErasureCodingCodecs(); + Map codecs = server.getErasureCodingCodecs(); GetErasureCodingCodecsResponseProto.Builder resBuilder = GetErasureCodingCodecsResponseProto.newBuilder(); for (Map.Entry codec : codecs.entrySet()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java new file mode 100644 index 00000000000..6233024467d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocolPB; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 0041d5eda7b..0f4091dcb23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -286,8 +286,7 @@ public class Journal implements Closeable { fjm.setLastReadableTxId(val); } - @VisibleForTesting - JournalMetrics getMetricsForTests() { + JournalMetrics getMetrics() { return metrics; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java index cffe2c1f55a..fcfd9016cd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java @@ -45,6 +45,9 @@ class JournalMetrics { @Metric("Number of batches written where this node was lagging") MutableCounterLong batchesWrittenWhileLagging; + + @Metric("Number of edit logs downloaded by JournalNodeSyncer") + private MutableCounterLong numEditLogsSynced; private final int[] QUANTILE_INTERVALS = new int[] { 1*60, // 1m @@ -120,4 +123,12 @@ class JournalMetrics { q.add(us); } } + + public MutableCounterLong getNumEditLogsSynced() { + return numEditLogsSynced; + } + + public void incrNumEditLogsSynced() { + numEditLogsSynced.incr(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java index 479f6a00e2c..537ba0a0fd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java @@ -77,6 +77,7 @@ public class JournalNodeSyncer { private final long journalSyncInterval; private final int logSegmentTransferTimeout; private final DataTransferThrottler throttler; + private final JournalMetrics metrics; JournalNodeSyncer(JournalNode jouranlNode, Journal journal, String jid, Configuration conf) { @@ -93,6 +94,7 @@ public class JournalNodeSyncer { DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY, DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_DEFAULT); throttler = getThrottler(conf); + metrics = journal.getMetrics(); } void stopSync() { @@ -411,6 +413,8 @@ public class JournalNodeSyncer { LOG.warn("Deleting " + tmpEditsFile + " has failed"); } return false; + } else { + metrics.incrNumEditLogsSynced(); } return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a5ee30bc1a5..6129db8a5a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -232,47 +232,47 @@ public class BlockManager implements BlockStatsMXBean { } /** Used by metrics. */ - public long getLowRedundancyBlocksStat() { + public long getLowRedundancyBlocks() { return neededReconstruction.getLowRedundancyBlocksStat(); } /** Used by metrics. */ - public long getCorruptBlocksStat() { + public long getCorruptBlocks() { return corruptReplicas.getCorruptBlocksStat(); } /** Used by metrics. */ - public long getMissingBlocksStat() { + public long getMissingBlocks() { return neededReconstruction.getCorruptBlocksStat(); } /** Used by metrics. */ - public long getMissingReplicationOneBlocksStat() { + public long getMissingReplicationOneBlocks() { return neededReconstruction.getCorruptReplicationOneBlocksStat(); } /** Used by metrics. */ - public long getPendingDeletionBlocksStat() { + public long getPendingDeletionReplicatedBlocks() { return invalidateBlocks.getBlocksStat(); } /** Used by metrics. */ - public long getLowRedundancyECBlockGroupsStat() { + public long getLowRedundancyECBlockGroups() { return neededReconstruction.getLowRedundancyECBlockGroupsStat(); } /** Used by metrics. */ - public long getCorruptECBlockGroupsStat() { + public long getCorruptECBlockGroups() { return corruptReplicas.getCorruptECBlockGroupsStat(); } /** Used by metrics. */ - public long getMissingECBlockGroupsStat() { + public long getMissingECBlockGroups() { return neededReconstruction.getCorruptECBlockGroupsStat(); } /** Used by metrics. */ - public long getPendingDeletionECBlockGroupsStat() { + public long getPendingDeletionECBlockGroups() { return invalidateBlocks.getECBlockGroupsStat(); } @@ -705,17 +705,36 @@ public class BlockManager implements BlockStatsMXBean { datanodeManager.fetchDatanodes(live, dead, false); out.println("Live Datanodes: " + live.size()); out.println("Dead Datanodes: " + dead.size()); + // - // Dump contents of neededReconstruction + // Need to iterate over all queues from neededReplications + // except for the QUEUE_WITH_CORRUPT_BLOCKS) // synchronized (neededReconstruction) { out.println("Metasave: Blocks waiting for reconstruction: " - + neededReconstruction.size()); - for (Block block : neededReconstruction) { + + neededReconstruction.getLowRedundancyBlockCount()); + for (int i = 0; i < neededReconstruction.LEVEL; i++) { + if (i != neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS) { + for (Iterator it = neededReconstruction.iterator(i); + it.hasNext();) { + Block block = it.next(); + dumpBlockMeta(block, out); + } + } + } + // + // Now prints corrupt blocks separately + // + out.println("Metasave: Blocks currently missing: " + + neededReconstruction.getCorruptBlockSize()); + for (Iterator it = neededReconstruction. + iterator(neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS); + it.hasNext();) { + Block block = it.next(); dumpBlockMeta(block, out); } } - + // Dump any postponed over-replicated blocks out.println("Mis-replicated blocks that have been postponed:"); for (Block block : postponedMisreplicatedBlocks) { @@ -2292,11 +2311,11 @@ public class BlockManager implements BlockStatsMXBean { return bmSafeMode.getBytesInFuture(); } - public long getBytesInFutureReplicatedBlocksStat() { + public long getBytesInFutureReplicatedBlocks() { return bmSafeMode.getBytesInFutureBlocks(); } - public long getBytesInFutureStripedBlocksStat() { + public long getBytesInFutureECBlockGroups() { return bmSafeMode.getBytesInFutureECBlockGroups(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java similarity index 87% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index ae7982628fa..928036af869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -49,37 +49,47 @@ import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * Manages datanode decommissioning. A background monitor thread - * periodically checks the status of datanodes that are in-progress of - * decommissioning. + * Manages decommissioning and maintenance state for DataNodes. A background + * monitor thread periodically checks the status of DataNodes that are + * decommissioning or entering maintenance state. *

- * A datanode can be decommissioned in a few situations: + * A DataNode can be decommissioned in a few situations: *

    *
  • If a DN is dead, it is decommissioned immediately.
  • - *
  • If a DN is alive, it is decommissioned after all of its blocks - * are sufficiently replicated. Merely under-replicated blocks do not - * block decommissioning as long as they are above a replication + *
  • If a DN is alive, it is decommissioned after all of its blocks + * are sufficiently replicated. Merely under-replicated blocks do not + * block decommissioning as long as they are above a replication * threshold.
  • *
- * In the second case, the datanode transitions to a - * decommission-in-progress state and is tracked by the monitor thread. The - * monitor periodically scans through the list of insufficiently replicated - * blocks on these datanodes to - * determine if they can be decommissioned. The monitor also prunes this list - * as blocks become replicated, so monitor scans will become more efficient + * In the second case, the DataNode transitions to a DECOMMISSION_INPROGRESS + * state and is tracked by the monitor thread. The monitor periodically scans + * through the list of insufficiently replicated blocks on these DataNodes to + * determine if they can be DECOMMISSIONED. The monitor also prunes this list + * as blocks become replicated, so monitor scans will become more efficient * over time. *

- * Decommission-in-progress nodes that become dead do not progress to - * decommissioned until they become live again. This prevents potential + * DECOMMISSION_INPROGRESS nodes that become dead do not progress to + * DECOMMISSIONED until they become live again. This prevents potential * durability loss for singly-replicated blocks (see HDFS-6791). *

+ * DataNodes can also be put under maintenance state for any short duration + * maintenance operations. Unlike decommissioning, blocks are not always + * re-replicated for the DataNodes to enter maintenance state. When the + * blocks are replicated at least dfs.namenode.maintenance.replication.min, + * DataNodes transition to IN_MAINTENANCE state. Otherwise, just like + * decommissioning, DataNodes transition to ENTERING_MAINTENANCE state and + * wait for the blocks to be sufficiently replicated and then transition to + * IN_MAINTENANCE state. The block replication factor is relaxed for a maximum + * of maintenance expiry time. When DataNodes don't transition or join the + * cluster back by expiry time, blocks are re-replicated just as in + * decommissioning case as to avoid read or write performance degradation. + *

* This class depends on the FSNamesystem lock for synchronization. */ @InterfaceAudience.Private -public class DecommissionManager { - private static final Logger LOG = LoggerFactory.getLogger(DecommissionManager - .class); - +public class DatanodeAdminManager { + private static final Logger LOG = + LoggerFactory.getLogger(DatanodeAdminManager.class); private final Namesystem namesystem; private final BlockManager blockManager; private final HeartbeatManager hbManager; @@ -97,7 +107,7 @@ public class DecommissionManager { * the node from being marked as decommissioned. During a monitor tick, this * list is pruned as blocks becomes replicated. *

- * Note also that the reference to the list of under-replicated blocks + * Note also that the reference to the list of under-replicated blocks * will be null on initial add *

* However, this map can become out-of-date since it is not updated by block @@ -113,24 +123,23 @@ public class DecommissionManager { * outOfServiceNodeBlocks. Additional nodes wait in pendingNodes. */ private final Queue pendingNodes; - private Monitor monitor = null; - DecommissionManager(final Namesystem namesystem, + DatanodeAdminManager(final Namesystem namesystem, final BlockManager blockManager, final HeartbeatManager hbManager) { this.namesystem = namesystem; this.blockManager = blockManager; this.hbManager = hbManager; executor = Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d") + new ThreadFactoryBuilder().setNameFormat("DatanodeAdminMonitor-%d") .setDaemon(true).build()); outOfServiceNodeBlocks = new TreeMap<>(); pendingNodes = new LinkedList<>(); } /** - * Start the decommission monitor thread. + * Start the DataNode admin monitor thread. * @param conf */ void activate(Configuration conf) { @@ -151,7 +160,7 @@ public class DecommissionManager { if (strNodes != null) { LOG.warn("Deprecated configuration key {} will be ignored.", deprecatedKey); - LOG.warn("Please update your configuration to use {} instead.", + LOG.warn("Please update your configuration to use {} instead.", DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); } @@ -161,7 +170,8 @@ public class DecommissionManager { final int maxConcurrentTrackedNodes = conf.getInt( DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES, - DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT); + DFSConfigKeys + .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT); checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " + "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES); @@ -170,14 +180,14 @@ public class DecommissionManager { executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs, TimeUnit.SECONDS); - LOG.debug("Activating DecommissionManager with interval {} seconds, " + + LOG.debug("Activating DatanodeAdminManager with interval {} seconds, " + "{} max blocks per interval, " + "{} max concurrently tracked nodes.", intervalSecs, blocksPerInterval, maxConcurrentTrackedNodes); } /** - * Stop the decommission monitor thread, waiting briefly for it to terminate. + * Stop the admin monitor thread, waiting briefly for it to terminate. */ void close() { executor.shutdownNow(); @@ -187,7 +197,7 @@ public class DecommissionManager { } /** - * Start decommissioning the specified datanode. + * Start decommissioning the specified datanode. * @param node */ @VisibleForTesting @@ -211,7 +221,7 @@ public class DecommissionManager { } /** - * Stop decommissioning the specified datanode. + * Stop decommissioning the specified datanode. * @param node */ @VisibleForTesting @@ -224,7 +234,7 @@ public class DecommissionManager { if (node.isAlive()) { blockManager.processExtraRedundancyBlocksOnInService(node); } - // Remove from tracking in DecommissionManager + // Remove from tracking in DatanodeAdminManager pendingNodes.remove(node); outOfServiceNodeBlocks.remove(node); } else { @@ -303,7 +313,7 @@ public class DecommissionManager { blockManager.processExtraRedundancyBlocksOnInService(node); } - // Remove from tracking in DecommissionManager + // Remove from tracking in DatanodeAdminManager pendingNodes.remove(node); outOfServiceNodeBlocks.remove(node); } else { @@ -324,8 +334,9 @@ public class DecommissionManager { /** * Checks whether a block is sufficiently replicated/stored for - * decommissioning. For replicated blocks or striped blocks, full-strength - * replication or storage is not always necessary, hence "sufficient". + * DECOMMISSION_INPROGRESS or ENTERING_MAINTENANCE datanodes. For replicated + * blocks or striped blocks, full-strength replication or storage is not + * always necessary, hence "sufficient". * @return true if sufficient, else false. */ private boolean isSufficient(BlockInfo block, BlockCollection bc, @@ -416,9 +427,10 @@ public class DecommissionManager { } /** - * Checks to see if DNs have finished decommissioning. + * Checks to see if datanodes have finished DECOMMISSION_INPROGRESS or + * ENTERING_MAINTENANCE state. *

- * Since this is done while holding the namesystem lock, + * Since this is done while holding the namesystem lock, * the amount of work per monitor tick is limited. */ private class Monitor implements Runnable { @@ -440,15 +452,15 @@ public class DecommissionManager { */ private int numBlocksCheckedPerLock = 0; /** - * The number of nodes that have been checked on this tick. Used for + * The number of nodes that have been checked on this tick. Used for * statistics. */ private int numNodesChecked = 0; /** - * The last datanode in outOfServiceNodeBlocks that we've processed + * The last datanode in outOfServiceNodeBlocks that we've processed. */ - private DatanodeDescriptor iterkey = new DatanodeDescriptor(new - DatanodeID("", "", "", 0, 0, 0, 0)); + private DatanodeDescriptor iterkey = new DatanodeDescriptor( + new DatanodeID("", "", "", 0, 0, 0, 0)); Monitor(int numBlocksPerCheck, int maxConcurrentTrackedNodes) { this.numBlocksPerCheck = numBlocksPerCheck; @@ -463,8 +475,8 @@ public class DecommissionManager { @Override public void run() { if (!namesystem.isRunning()) { - LOG.info("Namesystem is not running, skipping decommissioning checks" - + "."); + LOG.info("Namesystem is not running, skipping " + + "decommissioning/maintenance checks."); return; } // Reset the checked count at beginning of each iteration @@ -486,7 +498,7 @@ public class DecommissionManager { } /** - * Pop datanodes off the pending list and into decomNodeBlocks, + * Pop datanodes off the pending list and into decomNodeBlocks, * subject to the maxConcurrentTrackedNodes limit. */ private void processPendingNodes() { @@ -522,8 +534,8 @@ public class DecommissionManager { continue; } if (blocks == null) { - // This is a newly added datanode, run through its list to schedule - // under-replicated blocks for replication and collect the blocks + // This is a newly added datanode, run through its list to schedule + // under-replicated blocks for replication and collect the blocks // that are insufficiently replicated for further tracking LOG.debug("Newly-added node {}, doing full scan to find " + "insufficiently-replicated blocks.", dn); @@ -531,26 +543,27 @@ public class DecommissionManager { outOfServiceNodeBlocks.put(dn, blocks); fullScan = true; } else { - // This is a known datanode, check if its # of insufficiently - // replicated blocks has dropped to zero and if it can be decommed + // This is a known datanode, check if its # of insufficiently + // replicated blocks has dropped to zero and if it can move + // to the next state. LOG.debug("Processing {} node {}", dn.getAdminState(), dn); pruneReliableBlocks(dn, blocks); } if (blocks.size() == 0) { if (!fullScan) { - // If we didn't just do a full scan, need to re-check with the + // If we didn't just do a full scan, need to re-check with the // full block map. // - // We've replicated all the known insufficiently replicated - // blocks. Re-check with the full block map before finally - // marking the datanode as decommissioned + // We've replicated all the known insufficiently replicated + // blocks. Re-check with the full block map before finally + // marking the datanode as DECOMMISSIONED or IN_MAINTENANCE. LOG.debug("Node {} has finished replicating current set of " + "blocks, checking with the full block map.", dn); blocks = handleInsufficientlyStored(dn); outOfServiceNodeBlocks.put(dn, blocks); } - // If the full scan is clean AND the node liveness is okay, - // we can finally mark as decommissioned. + // If the full scan is clean AND the node liveness is okay, + // we can finally mark as DECOMMISSIONED or IN_MAINTENANCE. final boolean isHealthy = blockManager.isNodeHealthyForDecommissionOrMaintenance(dn); if (blocks.size() == 0 && isHealthy) { @@ -580,7 +593,7 @@ public class DecommissionManager { } iterkey = dn; } - // Remove the datanodes that are decommissioned or in service after + // Remove the datanodes that are DECOMMISSIONED or in service after // maintenance expiration. for (DatanodeDescriptor dn : toRemove) { Preconditions.checkState(dn.isDecommissioned() || dn.isInService(), @@ -598,9 +611,9 @@ public class DecommissionManager { } /** - * Returns a list of blocks on a datanode that are insufficiently replicated - * or require recovery, i.e. requiring recovery and should prevent - * decommission. + * Returns a list of blocks on a datanode that are insufficiently + * replicated or require recovery, i.e. requiring recovery and + * should prevent decommission or maintenance. *

* As part of this, it also schedules replication/recovery work. * @@ -615,9 +628,10 @@ public class DecommissionManager { } /** - * Used while checking if decommission-in-progress datanodes can be marked - * as decommissioned. Combines shared logic of - * pruneReliableBlocks and handleInsufficientlyStored. + * Used while checking if DECOMMISSION_INPROGRESS datanodes can be + * marked as DECOMMISSIONED or ENTERING_MAINTENANCE datanodes can be + * marked as IN_MAINTENANCE. Combines shared logic of pruneReliableBlocks + * and handleInsufficientlyStored. * * @param datanode Datanode * @param it Iterator over the blocks on the @@ -652,7 +666,7 @@ public class DecommissionManager { // configured per-iteration-limit. namesystem.writeUnlock(); try { - LOG.debug("Yielded lock during decommission check"); + LOG.debug("Yielded lock during decommission/maintenance check"); Thread.sleep(0, 500); } catch (InterruptedException ignored) { return; @@ -682,8 +696,8 @@ public class DecommissionManager { final NumberReplicas num = blockManager.countNodes(block); final int liveReplicas = num.liveReplicas(); - // Schedule low redundancy blocks for reconstruction if not already - // pending + // Schedule low redundancy blocks for reconstruction + // if not already pending. boolean isDecommission = datanode.isDecommissionInProgress(); boolean neededReconstruction = isDecommission ? blockManager.isNeededReconstruction(block, num) : @@ -701,7 +715,8 @@ public class DecommissionManager { } // Even if the block is without sufficient redundancy, - // it doesn't block decommission if has sufficient redundancy + // it might not block decommission/maintenance if it + // has sufficient redundancy. if (isSufficient(block, bc, num, isDecommission)) { if (pruneReliableBlocks) { it.remove(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 2bd4a203ff7..d35894caef3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -661,7 +661,11 @@ public class DatanodeDescriptor extends DatanodeInfo { return erasurecodeBlocks.size(); } - public List getReplicationCommand(int maxTransfers) { + int getNumberOfReplicateBlocks() { + return replicateBlocks.size(); + } + + List getReplicationCommand(int maxTransfers) { return replicateBlocks.poll(maxTransfers); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 1d0975111d9..c75bcea45b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -75,7 +75,7 @@ public class DatanodeManager { private final Namesystem namesystem; private final BlockManager blockManager; - private final DecommissionManager decomManager; + private final DatanodeAdminManager datanodeAdminManager; private final HeartbeatManager heartbeatManager; private final FSClusterStats fsClusterStats; @@ -212,8 +212,6 @@ public class DatanodeManager { this.namesystem = namesystem; this.blockManager = blockManager; - // TODO: Enables DFSNetworkTopology by default after more stress - // testings/validations. this.useDfsNetworkTopology = conf.getBoolean( DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT); @@ -223,9 +221,10 @@ public class DatanodeManager { networktopology = NetworkTopology.getInstance(conf); } - this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf); - this.decomManager = new DecommissionManager(namesystem, blockManager, - heartbeatManager); + this.heartbeatManager = new HeartbeatManager(namesystem, + blockManager, conf); + this.datanodeAdminManager = new DatanodeAdminManager(namesystem, + blockManager, heartbeatManager); this.fsClusterStats = newFSClusterStats(); this.dataNodePeerStatsEnabled = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY, @@ -290,12 +289,19 @@ public class DatanodeManager { DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds; - final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds), + + // Effected block invalidate limit is the bigger value between + // value configured in hdfs-site.xml, and 20 * HB interval. + final int configuredBlockInvalidateLimit = conf.getInt( + DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); - this.blockInvalidateLimit = conf.getInt( - DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit); + final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds); + this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit, + configuredBlockInvalidateLimit); LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY - + "=" + this.blockInvalidateLimit); + + ": configured=" + configuredBlockInvalidateLimit + + ", counted=" + countedBlockInvalidateLimit + + ", effected=" + blockInvalidateLimit); this.checkIpHostnameInRegistration = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY, @@ -365,12 +371,12 @@ public class DatanodeManager { } void activate(final Configuration conf) { - decomManager.activate(conf); + datanodeAdminManager.activate(conf); heartbeatManager.activate(); } void close() { - decomManager.close(); + datanodeAdminManager.close(); heartbeatManager.close(); } @@ -385,8 +391,8 @@ public class DatanodeManager { } @VisibleForTesting - public DecommissionManager getDecomManager() { - return decomManager; + public DatanodeAdminManager getDatanodeAdminManager() { + return datanodeAdminManager; } public HostConfigManager getHostConfigManager() { @@ -403,7 +409,8 @@ public class DatanodeManager { return fsClusterStats; } - int getBlockInvalidateLimit() { + @VisibleForTesting + public int getBlockInvalidateLimit() { return blockInvalidateLimit; } @@ -983,9 +990,9 @@ public class DatanodeManager { hostConfigManager.getMaintenanceExpirationTimeInMS(nodeReg); // If the registered node is in exclude list, then decommission it if (getHostConfigManager().isExcluded(nodeReg)) { - decomManager.startDecommission(nodeReg); + datanodeAdminManager.startDecommission(nodeReg); } else if (nodeReg.maintenanceNotExpired(maintenanceExpireTimeInMS)) { - decomManager.startMaintenance(nodeReg, maintenanceExpireTimeInMS); + datanodeAdminManager.startMaintenance(nodeReg, maintenanceExpireTimeInMS); } } @@ -1211,12 +1218,13 @@ public class DatanodeManager { long maintenanceExpireTimeInMS = hostConfigManager.getMaintenanceExpirationTimeInMS(node); if (node.maintenanceNotExpired(maintenanceExpireTimeInMS)) { - decomManager.startMaintenance(node, maintenanceExpireTimeInMS); + datanodeAdminManager.startMaintenance( + node, maintenanceExpireTimeInMS); } else if (hostConfigManager.isExcluded(node)) { - decomManager.startDecommission(node); + datanodeAdminManager.startDecommission(node); } else { - decomManager.stopMaintenance(node); - decomManager.stopDecommission(node); + datanodeAdminManager.stopMaintenance(node); + datanodeAdminManager.stopDecommission(node); } } node.setUpgradeDomain(hostConfigManager.getUpgradeDomain(node)); @@ -1655,21 +1663,38 @@ public class DatanodeManager { } final List cmds = new ArrayList<>(); - // check pending replication - List pendingList = nodeinfo.getReplicationCommand( - maxTransfers); - if (pendingList != null) { - cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId, - pendingList)); - maxTransfers -= pendingList.size(); - } - // check pending erasure coding tasks - List pendingECList = nodeinfo - .getErasureCodeCommand(maxTransfers); - if (pendingECList != null) { - cmds.add(new BlockECReconstructionCommand( - DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList)); + // Allocate _approximately_ maxTransfers pending tasks to DataNode. + // NN chooses pending tasks based on the ratio between the lengths of + // replication and erasure-coded block queues. + int totalReplicateBlocks = nodeinfo.getNumberOfReplicateBlocks(); + int totalECBlocks = nodeinfo.getNumberOfBlocksToBeErasureCoded(); + int totalBlocks = totalReplicateBlocks + totalECBlocks; + if (totalBlocks > 0) { + int numReplicationTasks = (int) Math.ceil( + (double) (totalReplicateBlocks * maxTransfers) / totalBlocks); + int numECTasks = (int) Math.ceil( + (double) (totalECBlocks * maxTransfers) / totalBlocks); + + if (LOG.isDebugEnabled()) { + LOG.debug("Pending replication tasks: " + numReplicationTasks + + " erasure-coded tasks: " + numECTasks); + } + // check pending replication tasks + List pendingList = nodeinfo.getReplicationCommand( + numReplicationTasks); + if (pendingList != null && !pendingList.isEmpty()) { + cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId, + pendingList)); + } + // check pending erasure coding tasks + List pendingECList = nodeinfo + .getErasureCodeCommand(numECTasks); + if (pendingECList != null && !pendingECList.isEmpty()) { + cmds.add(new BlockECReconstructionCommand( + DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList)); + } } + // check block invalidation Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit); if (blks != null) { @@ -1911,7 +1936,7 @@ public class DatanodeManager { this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000 * intervalSeconds; this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds), - DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); + blockInvalidateLimit); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index e1e642abe4a..5ae5b30fb3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -2256,6 +2256,17 @@ public class DataNode extends ReconfigurableBase xmitsInProgress.getAndIncrement(); } + /** + * Increments the xmitInProgress count by given value. + * + * @param delta the amount of xmitsInProgress to increase. + * @see #incrementXmitsInProgress() + */ + public void incrementXmitsInProcess(int delta) { + Preconditions.checkArgument(delta >= 0); + xmitsInProgress.getAndAdd(delta); + } + /** * Decrements the xmitsInProgress count */ @@ -2263,6 +2274,16 @@ public class DataNode extends ReconfigurableBase xmitsInProgress.getAndDecrement(); } + /** + * Decrements the xmitsInProgress count by given value. + * + * @see #decrementXmitsInProgress() + */ + public void decrementXmitsInProgress(int delta) { + Preconditions.checkArgument(delta >= 0); + xmitsInProgress.getAndAdd(0 - delta); + } + private void reportBadBlock(final BPOfferService bpos, final ExtendedBlock block, final String msg) { FsVolumeSpi volume = getFSDataset().getVolume(block); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 8ffd3a4f3fc..3216a78b7b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -811,20 +811,25 @@ class DataXceiver extends Receiver implements Runnable { smallBufferSize)); mirrorIn = new DataInputStream(unbufMirrorIn); + String targetStorageId = null; + if (targetStorageIds.length > 0) { + // Older clients may not have provided any targetStorageIds + targetStorageId = targetStorageIds[0]; + } if (targetPinnings != null && targetPinnings.length > 0) { new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, targetPinnings[0], targetPinnings, - targetStorageIds[0], targetStorageIds); + targetStorageId, targetStorageIds); } else { new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, false, targetPinnings, - targetStorageIds[0], targetStorageIds); + targetStorageId, targetStorageIds); } mirrorOut.flush(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index e076dda9809..72c224f2f77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -27,6 +27,7 @@ import org.apache.hadoop.util.Daemon; import org.slf4j.Logger; import java.util.Collection; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -93,7 +94,8 @@ public final class ErasureCodingWorker { LOG.debug("Using striped block reconstruction; pool threads={}", numThreads); stripedReconstructionPool = DFSUtilClient.getThreadPoolExecutor(2, - numThreads, 60, "StripedBlockReconstruction-", false); + numThreads, 60, new LinkedBlockingQueue<>(), + "StripedBlockReconstruction-", false); stripedReconstructionPool.allowCoreThreadTimeOut(true); } @@ -106,6 +108,7 @@ public final class ErasureCodingWorker { public void processErasureCodingTasks( Collection ecTasks) { for (BlockECReconstructionInfo reconInfo : ecTasks) { + int xmitsSubmitted = 0; try { StripedReconstructionInfo stripedReconInfo = new StripedReconstructionInfo( @@ -113,15 +116,25 @@ public final class ErasureCodingWorker { reconInfo.getLiveBlockIndices(), reconInfo.getSourceDnInfos(), reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes(), reconInfo.getTargetStorageIDs()); + // It may throw IllegalArgumentException from task#stripedReader + // constructor. final StripedBlockReconstructor task = new StripedBlockReconstructor(this, stripedReconInfo); if (task.hasValidTargets()) { + // See HDFS-12044. We increase xmitsInProgress even the task is only + // enqueued, so that + // 1) NN will not send more tasks than what DN can execute and + // 2) DN will not throw away reconstruction tasks, and instead keeps + // an unbounded number of tasks in the executor's task queue. + xmitsSubmitted = task.getXmits(); + getDatanode().incrementXmitsInProcess(xmitsSubmitted); stripedReconstructionPool.submit(task); } else { LOG.warn("No missing internal block. Skip reconstruction for task:{}", reconInfo); } } catch (Throwable e) { + getDatanode().decrementXmitsInProgress(xmitsSubmitted); LOG.warn("Failed to reconstruct striped block {}", reconInfo.getExtendedBlock().getLocalBlock(), e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java index 1119bbbd230..bac013aea29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java @@ -48,7 +48,6 @@ class StripedBlockReconstructor extends StripedReconstructor @Override public void run() { - getDatanode().incrementXmitsInProgress(); try { initDecoderIfNecessary(); @@ -66,7 +65,7 @@ class StripedBlockReconstructor extends StripedReconstructor LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e); getDatanode().getMetrics().incrECFailedReconstructionTasks(); } finally { - getDatanode().decrementXmitsInProgress(); + getDatanode().decrementXmitsInProgress(getXmits()); final DataNodeMetrics metrics = getDatanode().getMetrics(); metrics.incrECReconstructionTasks(); metrics.incrECReconstructionBytesRead(getBytesRead()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java index f6f343a6bf3..96f97915455 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java @@ -68,6 +68,8 @@ class StripedReader { private int[] successList; private final int minRequiredSources; + // the number of xmits used by the re-construction task. + private final int xmits; // The buffers and indices for striped blocks whose length is 0 private ByteBuffer[] zeroStripeBuffers; private short[] zeroStripeIndices; @@ -107,6 +109,12 @@ class StripedReader { zeroStripeIndices = new short[zeroStripNum]; } + // It is calculated by the maximum number of connections from either sources + // or targets. + xmits = Math.max(minRequiredSources, + stripedReconInfo.getTargets() != null ? + stripedReconInfo.getTargets().length : 0); + this.liveIndices = stripedReconInfo.getLiveIndices(); assert liveIndices != null; this.sources = stripedReconInfo.getSources(); @@ -472,4 +480,16 @@ class StripedReader { CachingStrategy getCachingStrategy() { return reconstructor.getCachingStrategy(); } + + /** + * Return the xmits of this EC reconstruction task. + *

+ * DN uses it to coordinate with NN to adjust the speed of scheduling the + * EC reconstruction tasks to this DN. + * + * @return the xmits of this reconstruction task. + */ + int getXmits() { + return xmits; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java index a619c34781c..0a3e12546df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java @@ -103,5 +103,20 @@ public class StripedReconstructionInfo { String[] getTargetStorageIds() { return targetStorageIds; } + + /** + * Return the weight of this EC reconstruction task. + * + * DN uses it to coordinate with NN to adjust the speed of scheduling the + * reconstructions tasks to this DN. + * + * @return the weight of this reconstruction task. + * @see HDFS-12044 + */ + int getWeight() { + // See HDFS-12044. The weight of a RS(n, k) is calculated by the network + // connections it opens. + return sources.length + targets.length; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java index b8433c7b6c3..3202121b62e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java @@ -133,7 +133,6 @@ abstract class StripedReconstructor { } blockGroup = stripedReconInfo.getBlockGroup(); stripedReader = new StripedReader(this, datanode, conf, stripedReconInfo); - cachingStrategy = CachingStrategy.newDefaultStrategy(); positionInBlock = 0L; @@ -233,6 +232,13 @@ abstract class StripedReconstructor { return blockGroup; } + /** + * Get the xmits that _will_ be used for this reconstruction task. + */ + int getXmits() { + return stripedReader.getXmits(); + } + BitSet getLiveBitSet() { return liveBitSet; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 6a61d31e26d..16df7091da7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -536,7 +536,7 @@ class FsDatasetImpl implements FsDatasetSpi { ReplicaInfo block = it.next(); final StorageLocation blockStorageLocation = block.getVolume().getStorageLocation(); - LOG.info("checking for block " + block.getBlockId() + + LOG.trace("checking for block " + block.getBlockId() + " with storageLocation " + blockStorageLocation); if (blockStorageLocation.equals(sdLocation)) { blocks.add(block); @@ -991,8 +991,7 @@ class FsDatasetImpl implements FsDatasetSpi { replicaInfo, smallBufferSize, conf); // Finalize the copied files - newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo, - false); + newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo); try (AutoCloseableLock lock = datasetLock.acquire()) { // Increment numBlocks here as this block moved without knowing to BPS FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume(); @@ -1295,7 +1294,7 @@ class FsDatasetImpl implements FsDatasetSpi { replicaInfo.bumpReplicaGS(newGS); // finalize the replica if RBW if (replicaInfo.getState() == ReplicaState.RBW) { - finalizeReplica(b.getBlockPoolId(), replicaInfo, false); + finalizeReplica(b.getBlockPoolId(), replicaInfo); } return replicaInfo; } @@ -1625,23 +1624,39 @@ class FsDatasetImpl implements FsDatasetSpi { @Override // FsDatasetSpi public void finalizeBlock(ExtendedBlock b, boolean fsyncDir) throws IOException { + ReplicaInfo replicaInfo = null; + ReplicaInfo finalizedReplicaInfo = null; try (AutoCloseableLock lock = datasetLock.acquire()) { if (Thread.interrupted()) { // Don't allow data modifications from interrupted threads throw new IOException("Cannot finalize block from Interrupted Thread"); } - ReplicaInfo replicaInfo = getReplicaInfo(b); + replicaInfo = getReplicaInfo(b); if (replicaInfo.getState() == ReplicaState.FINALIZED) { // this is legal, when recovery happens on a file that has // been opened for append but never modified return; } - finalizeReplica(b.getBlockPoolId(), replicaInfo, fsyncDir); + finalizedReplicaInfo = finalizeReplica(b.getBlockPoolId(), replicaInfo); + } + /* + * Sync the directory after rename from tmp/rbw to Finalized if + * configured. Though rename should be atomic operation, sync on both + * dest and src directories are done because IOUtils.fsync() calls + * directory's channel sync, not the journal itself. + */ + if (fsyncDir && finalizedReplicaInfo instanceof FinalizedReplica + && replicaInfo instanceof LocalReplica) { + FinalizedReplica finalizedReplica = + (FinalizedReplica) finalizedReplicaInfo; + finalizedReplica.fsyncDirectory(); + LocalReplica localReplica = (LocalReplica) replicaInfo; + localReplica.fsyncDirectory(); } } - private ReplicaInfo finalizeReplica(String bpid, - ReplicaInfo replicaInfo, boolean fsyncDir) throws IOException { + private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo) + throws IOException { try (AutoCloseableLock lock = datasetLock.acquire()) { ReplicaInfo newReplicaInfo = null; if (replicaInfo.getState() == ReplicaState.RUR && @@ -1656,19 +1671,6 @@ class FsDatasetImpl implements FsDatasetSpi { newReplicaInfo = v.addFinalizedBlock( bpid, replicaInfo, replicaInfo, replicaInfo.getBytesReserved()); - /* - * Sync the directory after rename from tmp/rbw to Finalized if - * configured. Though rename should be atomic operation, sync on both - * dest and src directories are done because IOUtils.fsync() calls - * directory's channel sync, not the journal itself. - */ - if (fsyncDir && newReplicaInfo instanceof FinalizedReplica - && replicaInfo instanceof LocalReplica) { - FinalizedReplica finalizedReplica = (FinalizedReplica) newReplicaInfo; - finalizedReplica.fsyncDirectory(); - LocalReplica localReplica = (LocalReplica) replicaInfo; - localReplica.fsyncDirectory(); - } if (v.isTransientStorage()) { releaseLockedMemory( replicaInfo.getOriginalBytesReserved() @@ -2634,11 +2636,11 @@ class FsDatasetImpl implements FsDatasetSpi { newReplicaInfo.setNumBytes(newlength); volumeMap.add(bpid, newReplicaInfo.getReplicaInfo()); - finalizeReplica(bpid, newReplicaInfo.getReplicaInfo(), false); + finalizeReplica(bpid, newReplicaInfo.getReplicaInfo()); } } // finalize the block - return finalizeReplica(bpid, rur, false); + return finalizeReplica(bpid, rur); } @Override // FsDatasetSpi diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 1a2c889e41d..b653f4fcccf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -348,7 +348,7 @@ public class Mover { private void processRecursively(String parent, HdfsFileStatus status, Result result) { String fullPath = status.getFullName(parent); - if (status.isDir()) { + if (status.isDirectory()) { if (!fullPath.endsWith(Path.SEPARATOR)) { fullPath = fullPath + Path.SEPARATOR; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index b859148a443..318d8e011a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -471,7 +471,7 @@ public class BackupNode extends NameNode { * {@link LeaseManager.Monitor} protected by SafeMode. * {@link BlockManager.RedundancyMonitor} protected by SafeMode. * {@link HeartbeatManager.Monitor} protected by SafeMode. - * {@link DecommissionManager.Monitor} need to prohibit refreshNodes(). + * {@link DatanodeAdminManager.Monitor} need to prohibit refreshNodes(). * {@link PendingReconstructionBlocks.PendingReconstructionMonitor} * harmless, because RedundancyMonitor is muted. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 266d45cdc0b..404a0aab04b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -26,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.erasurecode.CodecUtil; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +49,6 @@ public final class ErasureCodingPolicyManager { public static Logger LOG = LoggerFactory.getLogger( ErasureCodingPolicyManager.class); - private static final byte USER_DEFINED_POLICY_START_ID = (byte) 64; private int maxCellSize = DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT; @@ -92,9 +93,14 @@ public final class ErasureCodingPolicyManager { public void init(Configuration conf) { // Populate the list of enabled policies from configuration - final String[] policyNames = conf.getTrimmedStrings( - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); + final String[] enablePolicyNames = conf.getTrimmedStrings( + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); + final String defaultPolicyName = conf.getTrimmed( + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT); + final String[] policyNames = + (String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName); this.userPoliciesByID = new TreeMap<>(); this.userPoliciesByName = new TreeMap<>(); this.removedPoliciesByName = new TreeMap<>(); @@ -151,7 +157,13 @@ public final class ErasureCodingPolicyManager { * Get enabled policy by policy name. */ public ErasureCodingPolicy getEnabledPolicyByName(String name) { - return enabledPoliciesByName.get(name); + ErasureCodingPolicy ecPolicy = enabledPoliciesByName.get(name); + if (ecPolicy == null) { + if (name.equalsIgnoreCase(ErasureCodeConstants.REPLICATION_POLICY_NAME)) { + ecPolicy = SystemErasureCodingPolicies.getReplicationPolicy(); + } + } + return ecPolicy; } /** @@ -251,7 +263,8 @@ public final class ErasureCodingPolicyManager { private byte getNextAvailablePolicyID() { byte currentId = this.userPoliciesByID.keySet().stream() - .max(Byte::compareTo).orElse(USER_DEFINED_POLICY_START_ID); + .max(Byte::compareTo).orElse( + ErasureCodeConstants.USER_DEFINED_POLICY_START_ID); return (byte) (currentId + 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 486503cbf7f..426b42b4414 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -25,8 +25,8 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import com.google.common.base.Preconditions; @@ -62,7 +62,7 @@ final class FSDirErasureCodingOp { /** * Check if the ecPolicyName is valid and enabled, return the corresponding - * EC policy if is. + * EC policy if is, including the REPLICATION EC policy. * @param fsn namespace * @param ecPolicyName name of EC policy to be checked * @return an erasure coding policy if ecPolicyName is valid and enabled @@ -295,7 +295,12 @@ final class FSDirErasureCodingOp { if (iip.getLastINode() == null) { throw new FileNotFoundException("Path not found: " + iip.getPath()); } - return getErasureCodingPolicyForPath(fsd, iip); + + ErasureCodingPolicy ecPolicy = getErasureCodingPolicyForPath(fsd, iip); + if (ecPolicy != null && ecPolicy.isReplicationPolicy()) { + ecPolicy = null; + } + return ecPolicy; } /** @@ -312,7 +317,8 @@ final class FSDirErasureCodingOp { } /** - * Get the erasure coding policy. This does not do any permission checking. + * Get the erasure coding policy, including the REPLICATION policy. This does + * not do any permission checking. * * @param fsn namespace * @param iip inodes in the path containing the file @@ -344,12 +350,13 @@ final class FSDirErasureCodingOp { * @param fsn namespace * @return {@link java.util.HashMap} array */ - static HashMap getErasureCodingCodecs(final FSNamesystem fsn) + static Map getErasureCodingCodecs(final FSNamesystem fsn) throws IOException { assert fsn.hasReadLock(); return CodecRegistry.getInstance().getCodec2CoderCompactMap(); } + //return erasure coding policy for path, including REPLICATION policy private static ErasureCodingPolicy getErasureCodingPolicyForPath( FSDirectory fsd, INodesInPath iip) throws IOException { Preconditions.checkNotNull(iip, "INodes cannot be null"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 4c9224908bc..3b3368d8e14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; @@ -47,6 +46,7 @@ import org.apache.hadoop.security.AccessControlException; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; +import java.util.EnumSet; import static org.apache.hadoop.util.Time.now; @@ -384,7 +384,6 @@ class FSDirStatAndListingOp { * @param child for a directory listing of the iip, else null * @param storagePolicy for the path or closest ancestor * @param needLocation if block locations need to be included or not - * @param includeStoragePolicy if storage policy should be returned * @return a file status * @throws java.io.IOException if any error occurs */ @@ -439,7 +438,19 @@ class FSDirStatAndListingOp { int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; + EnumSet flags = + EnumSet.noneOf(HdfsFileStatus.Flags.class); INodeAttributes nodeAttrs = fsd.getAttributes(iip); + boolean hasAcl = nodeAttrs.getAclFeature() != null; + if (hasAcl) { + flags.add(HdfsFileStatus.Flags.HAS_ACL); + } + if (isEncrypted) { + flags.add(HdfsFileStatus.Flags.HAS_CRYPT); + } + if (isErasureCoded) { + flags.add(HdfsFileStatus.Flags.HAS_EC); + } return createFileStatus( size, node.isDirectory(), @@ -447,7 +458,8 @@ class FSDirStatAndListingOp { blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), - getPermissionForFileStatus(nodeAttrs, isEncrypted, isErasureCoded), + nodeAttrs.getFsPermission(), + flags, nodeAttrs.getUserName(), nodeAttrs.getGroupName(), node.isSymlink() ? node.asSymlink().getSymlink() : null, @@ -460,44 +472,24 @@ class FSDirStatAndListingOp { loc); } - private static HdfsFileStatus createFileStatus(long length, boolean isdir, - int replication, long blocksize, long mtime, - long atime, FsPermission permission, String owner, String group, - byte[] symlink, byte[] path, long fileId, int childrenNum, - FileEncryptionInfo feInfo, byte storagePolicy, + private static HdfsFileStatus createFileStatus( + long length, boolean isdir, + int replication, long blocksize, long mtime, long atime, + FsPermission permission, EnumSet flags, + String owner, String group, byte[] symlink, byte[] path, long fileId, + int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy, ErasureCodingPolicy ecPolicy, LocatedBlocks locations) { if (locations == null) { return new HdfsFileStatus(length, isdir, replication, blocksize, - mtime, atime, permission, owner, group, symlink, path, fileId, - childrenNum, feInfo, storagePolicy, ecPolicy); + mtime, atime, permission, flags, owner, group, symlink, path, + fileId, childrenNum, feInfo, storagePolicy, ecPolicy); } else { return new HdfsLocatedFileStatus(length, isdir, replication, blocksize, - mtime, atime, permission, owner, group, symlink, path, fileId, - locations, childrenNum, feInfo, storagePolicy, ecPolicy); + mtime, atime, permission, flags, owner, group, symlink, path, + fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy); } } - /** - * Returns an inode's FsPermission for use in an outbound FileStatus. If the - * inode has an ACL or is for an encrypted file/dir, then this method will - * return an FsPermissionExtension. - * - * @param node INode to check - * @param isEncrypted boolean true if the file/dir is encrypted - * @return FsPermission from inode, with ACL bit on if the inode has an ACL - * and encrypted bit on if it represents an encrypted file/dir. - */ - private static FsPermission getPermissionForFileStatus( - INodeAttributes node, boolean isEncrypted, boolean isErasureCoded) { - FsPermission perm = node.getFsPermission(); - boolean hasAcl = node.getAclFeature() != null; - if (hasAcl || isEncrypted || isErasureCoded) { - perm = new FsPermissionExtension(perm, hasAcl, - isEncrypted, isErasureCoded); - } - return perm; - } - private static ContentSummary getContentSummaryInt(FSDirectory fsd, INodesInPath iip) throws IOException { fsd.readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index a62cddd097c..7ab05d78860 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -541,7 +541,7 @@ class FSDirWriteFileOp { ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy( fsd.getFSNamesystem(), existing); } - if (ecPolicy != null) { + if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) { isStriped = true; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 07dc5c1fa17..87b11562dee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -72,12 +72,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.concurrent.ForkJoinPool; -import java.util.concurrent.RecursiveAction; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.ForkJoinPool; +import java.util.concurrent.RecursiveAction; import java.util.concurrent.locks.ReentrantReadWriteLock; import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES; @@ -135,11 +136,13 @@ public class FSDirectory implements Closeable { public final static HdfsFileStatus DOT_RESERVED_STATUS = new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770), - null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, + EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null, + HdfsFileStatus.EMPTY_NAME, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS = - new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, + new HdfsFileStatus(0, true, 0, 0, 0, 0, null, + EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); @@ -383,12 +386,15 @@ public class FSDirectory implements Closeable { */ void createReservedStatuses(long cTime) { HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, - new FsPermission((short) 0770), null, supergroup, null, + new FsPermission((short) 0770), + EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null, DOT_INODES, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, - new FsPermission((short) 0770), null, supergroup, null, RAW, -1L, - 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); + new FsPermission((short) 0770), + EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null, + RAW, -1L, 0, null, + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null); reservedStatuses = new HdfsFileStatus[] {inodes, raw}; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9872cd7720e..2313335d6ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -92,6 +92,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*; import org.apache.hadoop.hdfs.protocol.BlocksStats; import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.monotonicNow; @@ -176,6 +177,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException; @@ -243,10 +245,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature; import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; -import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsStatsMBean; +import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; -import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksStatsMBean; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; @@ -340,7 +341,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; @InterfaceAudience.Private @Metrics(context="dfs") public class FSNamesystem implements Namesystem, FSNamesystemMBean, - NameNodeMXBean, ReplicatedBlocksStatsMBean, ECBlockGroupsStatsMBean { + NameNodeMXBean, ReplicatedBlocksMBean, ECBlockGroupsMBean { public static final Log LOG = LogFactory.getLog(FSNamesystem.class); private final MetricsRegistry registry = new MetricsRegistry("FSNamesystem"); @Metric final MutableRatesWithAggregation detailedLockHoldTimeMetrics = @@ -371,9 +372,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } FileStatus status = null; if (stat != null) { - Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; + Path symlink = stat.isSymlink() + ? new Path(DFSUtilClient.bytes2String(stat.getSymlinkInBytes())) + : null; Path path = new Path(src); - status = new FileStatus(stat.getLen(), stat.isDir(), + status = new FileStatus(stat.getLen(), stat.isDirectory(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), @@ -4076,10 +4079,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * @see ClientProtocol#getBlocksStats() */ BlocksStats getBlocksStats() { - return new BlocksStats(getLowRedundancyBlocksStat(), - getCorruptBlocksStat(), getMissingBlocksStat(), - getMissingReplicationOneBlocksStat(), getBlocksBytesInFutureStat(), - getPendingDeletionBlocksStat()); + return new BlocksStats(getLowRedundancyReplicatedBlocks(), + getCorruptReplicatedBlocks(), getMissingReplicatedBlocks(), + getMissingReplicationOneBlocks(), getBytesInFutureReplicatedBlocks(), + getPendingDeletionReplicatedBlocks()); } /** @@ -4089,9 +4092,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * @see ClientProtocol#getECBlockGroupsStats() */ ECBlockGroupsStats getECBlockGroupsStats() { - return new ECBlockGroupsStats(getLowRedundancyECBlockGroupsStat(), - getCorruptECBlockGroupsStat(), getMissingECBlockGroupsStat(), - getECBlocksBytesInFutureStat(), getPendingDeletionECBlockGroupsStat()); + return new ECBlockGroupsStats(getLowRedundancyECBlockGroups(), + getCorruptECBlockGroups(), getMissingECBlockGroups(), + getBytesInFutureECBlockGroups(), getPendingDeletionECBlockGroups()); } @Override // FSNamesystemMBean @@ -4638,76 +4641,76 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, @Override // ReplicatedBlocksMBean @Metric({"LowRedundancyReplicatedBlocks", "Number of low redundancy replicated blocks"}) - public long getLowRedundancyBlocksStat() { - return blockManager.getLowRedundancyBlocksStat(); + public long getLowRedundancyReplicatedBlocks() { + return blockManager.getLowRedundancyBlocks(); } @Override // ReplicatedBlocksMBean @Metric({"CorruptReplicatedBlocks", "Number of corrupted replicated blocks"}) - public long getCorruptBlocksStat() { - return blockManager.getCorruptBlocksStat(); + public long getCorruptReplicatedBlocks() { + return blockManager.getCorruptBlocks(); } @Override // ReplicatedBlocksMBean @Metric({"MissingReplicatedBlocks", "Number of missing replicated blocks"}) - public long getMissingBlocksStat() { - return blockManager.getMissingBlocksStat(); + public long getMissingReplicatedBlocks() { + return blockManager.getMissingBlocks(); } @Override // ReplicatedBlocksMBean - @Metric({"MissingReplicatedOneBlocks", "Number of missing replicated blocks" + - " with replication factor 1"}) - public long getMissingReplicationOneBlocksStat() { - return blockManager.getMissingReplicationOneBlocksStat(); + @Metric({"MissingReplicationOneBlocks", "Number of missing replicated " + + "blocks with replication factor 1"}) + public long getMissingReplicationOneBlocks() { + return blockManager.getMissingReplicationOneBlocks(); } @Override // ReplicatedBlocksMBean - @Metric({"BytesReplicatedFutureBlocks", "Total bytes in replicated blocks " + - "with future generation stamp"}) - public long getBlocksBytesInFutureStat() { - return blockManager.getBytesInFutureReplicatedBlocksStat(); + @Metric({"BytesInFutureReplicatedBlocks", "Total bytes in replicated " + + "blocks with future generation stamp"}) + public long getBytesInFutureReplicatedBlocks() { + return blockManager.getBytesInFutureReplicatedBlocks(); } @Override // ReplicatedBlocksMBean @Metric({"PendingDeletionReplicatedBlocks", "Number of replicated blocks " + "that are pending deletion"}) - public long getPendingDeletionBlocksStat() { - return blockManager.getPendingDeletionBlocksStat(); + public long getPendingDeletionReplicatedBlocks() { + return blockManager.getPendingDeletionReplicatedBlocks(); } - @Override // ECBlockGroupsStatsMBean + @Override // ECBlockGroupsMBean @Metric({"LowRedundancyECBlockGroups", "Number of erasure coded block " + "groups with low redundancy"}) - public long getLowRedundancyECBlockGroupsStat() { - return blockManager.getLowRedundancyECBlockGroupsStat(); + public long getLowRedundancyECBlockGroups() { + return blockManager.getLowRedundancyECBlockGroups(); } - @Override // ECBlockGroupsStatsMBean + @Override // ECBlockGroupsMBean @Metric({"CorruptECBlockGroups", "Number of erasure coded block groups that" + " are corrupt"}) - public long getCorruptECBlockGroupsStat() { - return blockManager.getCorruptECBlockGroupsStat(); + public long getCorruptECBlockGroups() { + return blockManager.getCorruptECBlockGroups(); } - @Override // ECBlockGroupsStatsMBean + @Override // ECBlockGroupsMBean @Metric({"MissingECBlockGroups", "Number of erasure coded block groups that" + " are missing"}) - public long getMissingECBlockGroupsStat() { - return blockManager.getMissingECBlockGroupsStat(); + public long getMissingECBlockGroups() { + return blockManager.getMissingECBlockGroups(); } - @Override // ECBlockGroupsStatsMBean - @Metric({"BytesFutureECBlockGroups", "Total bytes in erasure coded block " + + @Override // ECBlockGroupsMBean + @Metric({"BytesInFutureECBlockGroups", "Total bytes in erasure coded block " + "groups with future generation stamp"}) - public long getECBlocksBytesInFutureStat() { - return blockManager.getBytesInFutureStripedBlocksStat(); + public long getBytesInFutureECBlockGroups() { + return blockManager.getBytesInFutureECBlockGroups(); } - @Override // ECBlockGroupsStatsMBean + @Override // ECBlockGroupsMBean @Metric({"PendingDeletionECBlockGroups", "Number of erasure coded block " + "groups that are pending deletion"}) - public long getPendingDeletionECBlockGroupsStat() { - return blockManager.getPendingDeletionECBlockGroupsStat(); + public long getPendingDeletionECBlockGroups() { + return blockManager.getPendingDeletionECBlockGroups(); } @Override @@ -4774,9 +4777,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * Register following MBeans with their respective names. * FSNamesystemMBean: * "hadoop:service=NameNode,name=FSNamesystemState" - * ReplicatedBlocksStatsMBean: + * ReplicatedBlocksMBean: * "hadoop:service=NameNode,name=ReplicatedBlocksState" - * ECBlockGroupsStatsMBean: + * ECBlockGroupsMBean: * "hadoop:service=NameNode,name=ECBlockGroupsState" */ private void registerMBean() { @@ -4785,9 +4788,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, StandardMBean namesystemBean = new StandardMBean( this, FSNamesystemMBean.class); StandardMBean replicaBean = new StandardMBean( - this, ReplicatedBlocksStatsMBean.class); + this, ReplicatedBlocksMBean.class); StandardMBean ecBean = new StandardMBean( - this, ECBlockGroupsStatsMBean.class); + this, ECBlockGroupsMBean.class); namesystemMBeanName = MBeans.register( "NameNode", "FSNamesystemState", namesystemBean); replicatedBlocksMBeanName = MBeans.register( @@ -4840,16 +4843,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"NumLiveDataNodes", "Number of datanodes which are currently live"}) public int getNumLiveDataNodes() { return getBlockManager().getDatanodeManager().getNumLiveDataNodes(); } @Override // FSNamesystemMBean + @Metric({"NumDeadDataNodes", "Number of datanodes which are currently dead"}) public int getNumDeadDataNodes() { return getBlockManager().getDatanodeManager().getNumDeadDataNodes(); } @Override // FSNamesystemMBean + @Metric({"NumDecomLiveDataNodes", + "Number of datanodes which have been decommissioned and are now live"}) public int getNumDecomLiveDataNodes() { final List live = new ArrayList(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); @@ -4861,6 +4868,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"NumDecomDeadDataNodes", + "Number of datanodes which have been decommissioned and are now dead"}) public int getNumDecomDeadDataNodes() { final List dead = new ArrayList(); getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false); @@ -4872,6 +4881,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"VolumeFailuresTotal", + "Total number of volume failures across all Datanodes"}) public int getVolumeFailuresTotal() { List live = new ArrayList(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); @@ -4883,6 +4894,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"EstimatedCapacityLostTotal", + "An estimate of the total capacity lost due to volume failures"}) public long getEstimatedCapacityLostTotal() { List live = new ArrayList(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); @@ -4898,6 +4911,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"NumDecommissioningDataNodes", + "Number of datanodes in decommissioning state"}) public int getNumDecommissioningDataNodes() { return getBlockManager().getDatanodeManager().getDecommissioningNodes() .size(); @@ -4915,6 +4930,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * before NN receives the first Heartbeat followed by the first Blockreport. */ @Override // FSNamesystemMBean + @Metric({"NumStaleStorages", + "Number of storages marked as content stale"}) public int getNumStaleStorages() { return getBlockManager().getDatanodeManager().getNumStaleStorages(); } @@ -4971,7 +4988,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return blockId; } - private boolean isFileDeleted(INodeFile file) { + boolean isFileDeleted(INodeFile file) { // Not in the inodeMap or in the snapshot but marked deleted. if (dir.getInode(file.getId()) == null) { return true; @@ -7038,18 +7055,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this, srcArg, ecPolicyName, pc, logRetryCache); success = true; - } catch (AccessControlException ace) { - logAuditEvent(success, operationName, srcArg, null, - resultingStat); - throw ace; } finally { writeUnlock(operationName); if (success) { getEditLog().logSync(); } + logAuditEvent(success, operationName, srcArg, null, resultingStat); } - logAuditEvent(success, operationName, srcArg, null, - resultingStat); } /** @@ -7057,9 +7069,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * @param policies The policies to add. * @return The according result of add operation. */ - AddECPolicyResponse[] addECPolicies(ErasureCodingPolicy[] policies) + AddECPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies) throws IOException { - final String operationName = "addECPolicies"; + final String operationName = "addErasureCodingPolicies"; String addECPolicyName = ""; checkOperation(OperationCategory.WRITE); List responses = new ArrayList<>(); @@ -7069,6 +7081,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, checkOperation(OperationCategory.WRITE); for (ErasureCodingPolicy policy : policies) { try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot add erasure coding policy"); ErasureCodingPolicy newPolicy = FSDirErasureCodingOp.addErasureCodePolicy(this, policy); addECPolicyName = newPolicy.getName(); @@ -7099,6 +7113,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, boolean success = false; writeLock(); try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot remove erasure coding policy " + + ecPolicyName); FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName); success = true; } finally { @@ -7184,18 +7201,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, resultingStat = FSDirErasureCodingOp.unsetErasureCodingPolicy(this, srcArg, pc, logRetryCache); success = true; - } catch (AccessControlException ace) { - logAuditEvent(success, operationName, srcArg, null, - resultingStat); - throw ace; } finally { writeUnlock(operationName); if (success) { getEditLog().logSync(); } + logAuditEvent(success, operationName, srcArg, null, resultingStat); } - logAuditEvent(success, operationName, srcArg, null, - resultingStat); } /** @@ -7203,14 +7215,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, */ ErasureCodingPolicy getErasureCodingPolicy(String src) throws AccessControlException, UnresolvedLinkException, IOException { + final String operationName = "getErasureCodingPolicy"; + boolean success = false; checkOperation(OperationCategory.READ); FSPermissionChecker pc = getPermissionChecker(); readLock(); try { checkOperation(OperationCategory.READ); - return FSDirErasureCodingOp.getErasureCodingPolicy(this, src, pc); + final ErasureCodingPolicy ret = + FSDirErasureCodingOp.getErasureCodingPolicy(this, src, pc); + success = true; + return ret; } finally { - readUnlock("getErasureCodingPolicy"); + readUnlock(operationName); + logAuditEvent(success, operationName, null); } } @@ -7218,27 +7236,39 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * Get available erasure coding polices */ ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException { + final String operationName = "getErasureCodingPolicies"; + boolean success = false; checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); - return FSDirErasureCodingOp.getErasureCodingPolicies(this); + final ErasureCodingPolicy[] ret = + FSDirErasureCodingOp.getErasureCodingPolicies(this); + success = true; + return ret; } finally { - readUnlock("getErasureCodingPolicies"); + readUnlock(operationName); + logAuditEvent(success, operationName, null); } } /** * Get available erasure coding codecs and corresponding coders. */ - HashMap getErasureCodingCodecs() throws IOException { + Map getErasureCodingCodecs() throws IOException { + final String operationName = "getErasureCodingCodecs"; + boolean success = false; checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); - return FSDirErasureCodingOp.getErasureCodingCodecs(this); + final Map ret = + FSDirErasureCodingOp.getErasureCodingCodecs(this); + success = true; + return ret; } finally { - readUnlock("getErasureCodingCodecs"); + readUnlock(operationName); + logAuditEvent(success, operationName, null); } } @@ -7539,6 +7569,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, @Override // FSNamesystemMBean + @Metric({"NumInMaintenanceLiveDataNodes", + "Number of live Datanodes which are in maintenance state"}) public int getNumInMaintenanceLiveDataNodes() { final List live = new ArrayList(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); @@ -7550,6 +7582,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"NumInMaintenanceDeadDataNodes", + "Number of dead Datanodes which are in maintenance state"}) public int getNumInMaintenanceDeadDataNodes() { final List dead = new ArrayList(); getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true); @@ -7561,6 +7595,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // FSNamesystemMBean + @Metric({"NumEnteringMaintenanceDataNodes", + "Number of Datanodes that are entering the maintenance state"}) public int getNumEnteringMaintenanceDataNodes() { return getBlockManager().getDatanodeManager().getEnteringMaintenanceNodes() .size(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 38cdbb30ef8..35ec063a1b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -26,10 +26,11 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; -import java.util.PriorityQueue; +import java.util.NavigableSet; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -87,11 +88,15 @@ public class LeaseManager { // Mapping: leaseHolder -> Lease private final SortedMap leases = new TreeMap<>(); // Set of: Lease - private final PriorityQueue sortedLeases = new PriorityQueue<>(512, + private final NavigableSet sortedLeases = new TreeSet<>( new Comparator() { @Override public int compare(Lease o1, Lease o2) { - return Long.signum(o1.getLastUpdate() - o2.getLastUpdate()); + if (o1.getLastUpdate() != o2.getLastUpdate()) { + return Long.signum(o1.getLastUpdate() - o2.getLastUpdate()); + } else { + return o1.holder.compareTo(o2.holder); + } } }); // INodeID -> Lease @@ -162,18 +167,25 @@ public class LeaseManager { * * @return Set */ - public Set getINodeWithLeases() { + @VisibleForTesting + Set getINodeWithLeases() throws IOException { return getINodeWithLeases(null); } private synchronized INode[] getINodesWithLease() { - int inodeCount = 0; - INode[] inodes = new INode[leasesById.size()]; + List inodes = new ArrayList<>(leasesById.size()); + INode currentINode; for (long inodeId : leasesById.keySet()) { - inodes[inodeCount] = fsnamesystem.getFSDirectory().getInode(inodeId); - inodeCount++; + currentINode = fsnamesystem.getFSDirectory().getInode(inodeId); + // A file with an active lease could get deleted, or its + // parent directories could get recursively deleted. + if (currentINode != null && + currentINode.isFile() && + !fsnamesystem.isFileDeleted(currentINode.asFile())) { + inodes.add(currentINode); + } } - return inodes; + return inodes.toArray(new INode[0]); } /** @@ -186,7 +198,7 @@ public class LeaseManager { * @return Set */ public Set getINodeWithLeases(final INodeDirectory - ancestorDir) { + ancestorDir) throws IOException { assert fsnamesystem.hasReadLock(); final long startTimeMs = Time.monotonicNow(); Set iipSet = new HashSet<>(); @@ -233,7 +245,7 @@ public class LeaseManager { try { iipSet.addAll(f.get()); } catch (Exception e) { - LOG.warn("INode filter task encountered exception: ", e); + throw new IOException("Failed to get files with active leases", e); } } final long endTimeMs = Time.monotonicNow(); @@ -521,9 +533,10 @@ public class LeaseManager { long start = monotonicNow(); - while(!sortedLeases.isEmpty() && sortedLeases.peek().expiredHardLimit() - && !isMaxLockHoldToReleaseLease(start)) { - Lease leaseToCheck = sortedLeases.peek(); + while(!sortedLeases.isEmpty() && + sortedLeases.first().expiredHardLimit() + && !isMaxLockHoldToReleaseLease(start)) { + Lease leaseToCheck = sortedLeases.first(); LOG.info(leaseToCheck + " has expired hard limit"); final List removing = new ArrayList<>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 39d93dff96b..78712020066 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -37,9 +37,9 @@ import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.Callable; @@ -80,6 +80,7 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB; import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.inotify.EventBatchList; @@ -250,13 +251,15 @@ public class NameNodeRpcServer implements NamenodeProtocols { private final String minimumDataNodeVersion; + private final String defaultECPolicyName; + public NameNodeRpcServer(Configuration conf, NameNode nn) throws IOException { this.nn = nn; this.namesystem = nn.getNamesystem(); this.retryCache = namesystem.getRetryCache(); this.metrics = NameNode.getNameNodeMetrics(); - + int handlerCount = conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, DFS_NAMENODE_HANDLER_COUNT_DEFAULT); @@ -489,6 +492,10 @@ public class NameNodeRpcServer implements NamenodeProtocols { DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); + defaultECPolicyName = conf.get( + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT); + // Set terse exception whose stack trace won't be logged clientRpcServer.addTerseExceptions(SafeModeException.class, FileNotFoundException.class, @@ -1430,7 +1437,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { } else if (!stat.isSymlink()) { throw new IOException("Path " + path + " is not a symbolic link"); } - return stat.getSymlink(); + return DFSUtilClient.bytes2String(stat.getSymlinkInBytes()); } @@ -2054,6 +2061,12 @@ public class NameNodeRpcServer implements NamenodeProtocols { } boolean success = false; try { + if (ecPolicyName == null) { + ecPolicyName = defaultECPolicyName; + LOG.trace("No policy name is specified, " + + "set the default policy name instead"); + } + LOG.trace("Set erasure coding policy " + ecPolicyName + " on " + src); namesystem.setErasureCodingPolicy(src, ecPolicyName, cacheEntry != null); success = true; } finally { @@ -2265,7 +2278,7 @@ public class NameNodeRpcServer implements NamenodeProtocols { } @Override // ClientProtocol - public HashMap getErasureCodingCodecs() throws IOException { + public Map getErasureCodingCodecs() throws IOException { checkNNStartup(); return namesystem.getErasureCodingCodecs(); } @@ -2297,13 +2310,14 @@ public class NameNodeRpcServer implements NamenodeProtocols { ErasureCodingPolicy[] policies) throws IOException { checkNNStartup(); namesystem.checkSuperuserPrivilege(); - return namesystem.addECPolicies(policies); + return namesystem.addErasureCodingPolicies(policies); } @Override public void removeErasureCodingPolicy(String ecPolicyName) throws IOException { checkNNStartup(); + namesystem.checkSuperuserPrivilege(); namesystem.removeErasureCodingPolicy(ecPolicyName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index eddab3fa22a..5872955fc52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -471,7 +471,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { void check(String parent, HdfsFileStatus file, Result replRes, Result ecRes) throws IOException { String path = file.getFullName(parent); - if (file.isDir()) { + if (file.isDirectory()) { checkDir(path, replRes, ecRes); return; } @@ -1115,7 +1115,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { if (lfStatus == null) { // not exists lfInitedOk = dfs.mkdirs(lfName, null, true); lostFound = lfName; - } else if (!lfStatus.isDir()) { // exists but not a directory + } else if (!lfStatus.isDirectory()) { // exists but not a directory LOG.warn("Cannot use /lost+found : a regular file with this name exists."); lfInitedOk = false; } else { // exists and is a directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java similarity index 87% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java index f9fd416f8e7..5fa646a6c44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java @@ -31,29 +31,29 @@ import org.apache.hadoop.classification.InterfaceAudience; * */ @InterfaceAudience.Private -public interface ECBlockGroupsStatsMBean { +public interface ECBlockGroupsMBean { /** * Return count of erasure coded block groups with low redundancy. */ - long getLowRedundancyECBlockGroupsStat(); + long getLowRedundancyECBlockGroups(); /** * Return count of erasure coded block groups that are corrupt. */ - long getCorruptECBlockGroupsStat(); + long getCorruptECBlockGroups(); /** * Return count of erasure coded block groups that are missing. */ - long getMissingECBlockGroupsStat(); + long getMissingECBlockGroups(); /** * Return total bytes of erasure coded future block groups. */ - long getECBlocksBytesInFutureStat(); + long getBytesInFutureECBlockGroups(); /** * Return count of erasure coded block groups that are pending deletion. */ - long getPendingDeletionECBlockGroupsStat(); + long getPendingDeletionECBlockGroups(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java similarity index 85% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java index 4643b803299..e2c924e56f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java @@ -30,34 +30,34 @@ import org.apache.hadoop.classification.InterfaceAudience; * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics */ @InterfaceAudience.Private -public interface ReplicatedBlocksStatsMBean { +public interface ReplicatedBlocksMBean { /** * Return low redundancy blocks count. */ - long getLowRedundancyBlocksStat(); + long getLowRedundancyReplicatedBlocks(); /** * Return corrupt blocks count. */ - long getCorruptBlocksStat(); + long getCorruptReplicatedBlocks(); /** * Return missing blocks count. */ - long getMissingBlocksStat(); + long getMissingReplicatedBlocks(); /** * Return count of missing blocks with replication factor one. */ - long getMissingReplicationOneBlocksStat(); + long getMissingReplicationOneBlocks(); /** * Return total bytes of future blocks. */ - long getBlocksBytesInFutureStat(); + long getBytesInFutureReplicatedBlocks(); /** * Return count of blocks that are pending deletion. */ - long getPendingDeletionBlocksStat(); + long getPendingDeletionReplicatedBlocks(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index 0ab928d04dc..23dcbe8c9a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -195,11 +195,17 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID); if (captureOpenFiles) { - Set openFilesIIP = - leaseManager.getINodeWithLeases(snapshotRoot); - for (INodesInPath openFileIIP : openFilesIIP) { - INodeFile openFile = openFileIIP.getLastINode().asFile(); - openFile.recordModification(openFileIIP.getLatestSnapshotId()); + try { + Set openFilesIIP = + leaseManager.getINodeWithLeases(snapshotRoot); + for (INodesInPath openFileIIP : openFilesIIP) { + INodeFile openFile = openFileIIP.getLastINode().asFile(); + openFile.recordModification(openFileIIP.getLatestSnapshotId()); + } + } catch (Exception e) { + throw new SnapshotException("Failed to add snapshot: Unable to " + + "capture all open files under the snapshot dir " + + snapshotRoot.getFullPathName() + " for snapshot '" + name + "'", e); } } return s; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index ffc203f9d93..4b479e04d8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -25,6 +25,7 @@ import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -35,6 +36,7 @@ import javax.management.ObjectName; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotInfo; @@ -345,8 +347,9 @@ public class SnapshotManager implements SnapshotStatsMXBean { if (userName == null || userName.equals(dir.getUserName())) { SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus( dir.getModificationTime(), dir.getAccessTime(), - dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), - dir.getLocalNameBytes(), dir.getId(), + dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class), + dir.getUserName(), dir.getGroupName(), + dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(Snapshot.CURRENT_STATE_ID), dir.getDirectorySnapshottableFeature().getNumSnapshots(), dir.getDirectorySnapshottableFeature().getSnapshotQuota(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java index 5006b5a20e0..55d85ff8d8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.util.ECPolicyLoader; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; import org.apache.hadoop.tools.TableListing; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; @@ -33,7 +34,6 @@ import org.apache.hadoop.util.ToolRunner; import java.io.IOException; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -310,7 +310,8 @@ public class ECAdmin extends Configured implements Tool { @Override public String getShortUsage() { - return "[" + getName() + " -path -policy ]\n"; + return "[" + getName() + + " -path [-policy ] [-replicate]]\n"; } @Override @@ -319,9 +320,13 @@ public class ECAdmin extends Configured implements Tool { listing.addRow("", "The path of the file/directory to set " + "the erasure coding policy"); listing.addRow("", "The name of the erasure coding policy"); + listing.addRow("-replicate", + "force 3x replication scheme on the directory"); return getShortUsage() + "\n" + "Set the erasure coding policy for a file/directory.\n\n" + - listing.toString(); + listing.toString() + "\n" + + "-replicate and -policy are optional arguments. They cannot been " + + "used at the same time"; } @Override @@ -333,28 +338,38 @@ public class ECAdmin extends Configured implements Tool { return 1; } - final String ecPolicyName = StringUtils.popOptionWithArgument("-policy", + String ecPolicyName = StringUtils.popOptionWithArgument("-policy", args); - if (ecPolicyName == null) { - System.err.println("Please specify the policy name.\nUsage: " + - getLongUsage()); - return 1; - } + final boolean replicate = StringUtils.popOption("-replicate", args); if (args.size() > 0) { System.err.println(getName() + ": Too many arguments"); return 1; } + if (replicate) { + if (ecPolicyName != null) { + System.err.println(getName() + + ": -replicate and -policy cannot been used at the same time"); + return 2; + } + ecPolicyName = ErasureCodeConstants.REPLICATION_POLICY_NAME; + } + final Path p = new Path(path); final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf); try { dfs.setErasureCodingPolicy(p, ecPolicyName); - System.out.println("Set erasure coding policy " + ecPolicyName + - " on " + path); + if (ecPolicyName == null){ + System.out.println("Set default erasure coding policy" + + " on " + path); + } else { + System.out.println("Set erasure coding policy " + ecPolicyName + + " on " + path); + } } catch (Exception e) { System.err.println(AdminHelper.prettifyException(e)); - return 2; + return 3; } return 0; } @@ -441,7 +456,7 @@ public class ECAdmin extends Configured implements Tool { final DistributedFileSystem dfs = AdminHelper.getDFS(conf); try { - HashMap codecs = + Map codecs = dfs.getAllErasureCodingCodecs(); if (codecs.isEmpty()) { System.out.println("No erasure coding codecs are supported on the " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java index 7a39ba6072b..ddf7933f032 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java @@ -20,17 +20,21 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer; import java.io.IOException; import java.io.OutputStream; +import javax.xml.transform.OutputKeys; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.sax.SAXTransformerFactory; +import javax.xml.transform.sax.TransformerHandler; +import javax.xml.transform.stream.StreamResult; +import org.xml.sax.ContentHandler; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.AttributesImpl; + import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.xml.sax.ContentHandler; -import org.xml.sax.SAXException; -import org.xml.sax.helpers.AttributesImpl; -import org.apache.xml.serialize.OutputFormat; -import org.apache.xml.serialize.XMLSerializer; /** * An XmlEditsVisitor walks over an EditLog structure and writes out @@ -41,26 +45,37 @@ import org.apache.xml.serialize.XMLSerializer; public class XmlEditsVisitor implements OfflineEditsVisitor { private final OutputStream out; private ContentHandler contentHandler; + private final SAXTransformerFactory factory; + private final static String XML_INDENTATION_PROP ="{http://xml.apache.org/" + + "xslt}indent-amount"; + private final static String XML_INDENTATION_NUM ="2"; /** * Create a processor that writes to the file named and may or may not * also output to the screen, as specified. * - * @param filename Name of file to write output to - * @param printToScreen Mirror output to screen? + * @param out output stream to write + * @throws IOException on any error */ public XmlEditsVisitor(OutputStream out) throws IOException { this.out = out; - OutputFormat outFormat = new OutputFormat("XML", "UTF-8", true); - outFormat.setIndenting(true); - outFormat.setIndent(2); - outFormat.setDoctype(null, null); - XMLSerializer serializer = new XMLSerializer(out, outFormat); - contentHandler = serializer.asContentHandler(); + factory =(SAXTransformerFactory)SAXTransformerFactory.newInstance(); try { + TransformerHandler handler = factory.newTransformerHandler(); + handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "xml"); + handler.getTransformer().setOutputProperty(OutputKeys.ENCODING, "UTF-8"); + handler.getTransformer().setOutputProperty(OutputKeys.INDENT, "yes"); + handler.getTransformer().setOutputProperty(XML_INDENTATION_PROP, + XML_INDENTATION_NUM); + handler.getTransformer().setOutputProperty(OutputKeys.STANDALONE, "yes"); + handler.setResult(new StreamResult(out)); + contentHandler = handler; + contentHandler.startDocument(); contentHandler.startElement("", "", "EDITS", new AttributesImpl()); + } catch (TransformerConfigurationException e) { + throw new IOException("SAXTransformer error: " + e.getMessage()); } catch (SAXException e) { throw new IOException("SAX error: " + e.getMessage()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index a8861a8ccb8..94752f53576 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -17,10 +17,18 @@ */ package org.apache.hadoop.hdfs.web; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.ipc.RemoteException; @@ -110,21 +118,20 @@ public class JsonUtil { m.put("pathSuffix", status.getLocalName()); m.put("type", WebHdfsConstants.PathType.valueOf(status)); if (status.isSymlink()) { - m.put("symlink", status.getSymlink()); + m.put("symlink", DFSUtilClient.bytes2String(status.getSymlinkInBytes())); } - m.put("length", status.getLen()); m.put("owner", status.getOwner()); m.put("group", status.getGroup()); FsPermission perm = status.getPermission(); m.put("permission", toString(perm)); - if (perm.getAclBit()) { + if (status.hasAcl()) { m.put("aclBit", true); } - if (perm.getEncryptedBit()) { + if (status.isEncrypted()) { m.put("encBit", true); } - if (perm.getErasureCodedBit()) { + if (status.isErasureCoded()) { m.put("ecBit", true); } m.put("accessTime", status.getAccessTime()); @@ -373,15 +380,6 @@ public class JsonUtil { FsPermission perm = status.getPermission(); if (perm != null) { m.put("permission", toString(perm)); - if (perm.getAclBit()) { - m.put("aclBit", true); - } - if (perm.getEncryptedBit()) { - m.put("encBit", true); - } - if (perm.getErasureCodedBit()) { - m.put("ecBit", true); - } } final Map> finalMap = new TreeMap>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 4caee9e76f8..03becc96ea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -459,7 +459,7 @@ dfs.namenode.posix.acl.inheritance.enabled - false + true Set to true to enable POSIX style ACL inheritance. When it is enabled and the create request comes from a compatible client, the NameNode @@ -960,17 +960,17 @@ dfs.namenode.decommission.interval 30s - Namenode periodicity in seconds to check if decommission is - complete. Support multiple time unit suffix(case insensitive), as described - in dfs.heartbeat.interval. + Namenode periodicity in seconds to check if + decommission or maintenance is complete. Support multiple time unit + suffix(case insensitive), as described in dfs.heartbeat.interval. dfs.namenode.decommission.blocks.per.interval 500000 - The approximate number of blocks to process per - decommission interval, as defined in dfs.namenode.decommission.interval. + The approximate number of blocks to process per decommission + or maintenance interval, as defined in dfs.namenode.decommission.interval. @@ -978,11 +978,12 @@ dfs.namenode.decommission.max.concurrent.tracked.nodes 100 - The maximum number of decommission-in-progress datanodes nodes that will be - tracked at one time by the namenode. Tracking a decommission-in-progress - datanode consumes additional NN memory proportional to the number of blocks - on the datnode. Having a conservative limit reduces the potential impact - of decomissioning a large number of nodes at once. + The maximum number of decommission-in-progress or + entering-maintenance datanodes nodes that will be tracked at one time by + the namenode. Tracking these datanode consumes additional NN memory + proportional to the number of blocks on the datnode. Having a conservative + limit reduces the potential impact of decommissioning or maintenance of + a large number of nodes at once. A value of 0 means no limit will be enforced. @@ -2544,13 +2545,14 @@ dfs.client.socket.send.buffer.size - 131072 + 0 Socket send buffer size for a write pipeline in DFSClient side. This may affect TCP connection throughput. If it is set to zero or negative value, no buffer size will be set explicitly, thus enable tcp auto-tuning on some system. + The default value is 0. @@ -2973,6 +2975,14 @@ + + dfs.namenode.ec.system.default.policy + RS-6-3-64k + The default erasure coding policy name will be used + on the path if no policy name is passed. + + + dfs.namenode.ec.policies.max.cellsize 4194304 @@ -3024,23 +3034,25 @@ dfs.datanode.transfer.socket.send.buffer.size - 131072 + 0 Socket send buffer size for DataXceiver (mirroring packets to downstream in pipeline). This may affect TCP connection throughput. If it is set to zero or negative value, no buffer size will be set explicitly, thus enable tcp auto-tuning on some system. + The default value is 0. dfs.datanode.transfer.socket.recv.buffer.size - 131072 + 0 Socket receive buffer size for DataXceiver (receiving packets from client during block writing). This may affect TCP connection throughput. If it is set to zero or negative value, no buffer size will be set explicitly, thus enable tcp auto-tuning on some system. + The default value is 0. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index 3e276a9a9f6..dae35196cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -370,6 +370,12 @@ var b = function() { browse_directory($('#directory').val()); }; $('#btn-nav-directory').click(b); + //Also navigate to the directory when a user presses enter. + $('#directory').on('keyup', function (e) { + if (e.which == 13) { + browse_directory($('#directory').val()); + } + }); var dir = window.location.hash.slice(1); if(dir == "") { window.location.hash = "/"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md index 1c0a2de1ea9..786b51278d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md @@ -65,9 +65,11 @@ Architecture 2. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work. - Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, five built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, and `XOR-2-1-64k`. + Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, six built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, `XOR-2-1-64k` and `REPLICATION`. - By default, all built-in erasure coding policies are disabled. + `REPLICATION` is a special policy. It can only be set on directory, to force the directory to adopt 3x replication scheme, instead of inheriting its ancestor's erasure coding policy. This policy makes it possible to interleave 3x replication scheme directory with erasure coding directory. + + `REPLICATION` policy is always enabled. For other built-in policies, unless they are configured in `dfs.namenode.ec.policies.enabled` property, otherwise they are disabled by default. Similar to HDFS storage policies, erasure coding policies are set on a directory. When a file is created, it inherits the EC policy of its nearest ancestor directory. @@ -112,11 +114,16 @@ Deployment what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies. By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them - in the `dfs .namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance, + in the `dfs.namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance, for a cluster with 9 racks, a policy like `RS-10-4-64k` will not preserve rack-level fault-tolerance, and `RS-6-3-64k` or `RS-3-2-64k` might be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-64k` would still be appropriate as long as there are at least 14 DataNodes in the cluster. + A system default EC policy can be configured via 'dfs.namenode.ec.system.default.policy' configuration. With this configuration, + the default EC policy will be used when no policy name is passed as an argument in the '-setPolicy' command. + + By default, the 'dfs.namenode.ec.system.default.policy' is "RS-6-3-64k". + The codec implementations for Reed-Solomon and XOR can be configured with the following client and DataNode configuration keys: `io.erasurecode.codec.rs.rawcoders` for the default RS codec, `io.erasurecode.codec.rs-legacy.rawcoders` for the legacy RS codec, @@ -148,7 +155,7 @@ Deployment HDFS provides an `ec` subcommand to perform administrative commands related to erasure coding. hdfs ec [generic options] - [-setPolicy -policy -path ] + [-setPolicy -path [-policy ] [-replicate]] [-getPolicy -path ] [-unsetPolicy -path ] [-listPolicies] @@ -160,13 +167,20 @@ Deployment Below are the details about each command. - * `[-setPolicy -policy -path ]` + * `[-setPolicy -path [-policy ] [-replicate]]` Sets an erasure coding policy on a directory at the specified path. `path`: An directory in HDFS. This is a mandatory parameter. Setting a policy only affects newly created files, and does not affect existing files. `policyName`: The erasure coding policy to be used for files under this directory. + This parameter can be omitted if a 'dfs.namenode.ec.system.default.policy' configuration is set. + The EC policy of the path will be set with the default value in configuration. + + `-replicate` apply the special `REPLICATION` policy on the directory, force the directory to adopt 3x replication scheme. + + `-replicate` and `-policy ` are optional arguments. They cannot be specified at the same time. + * `[-getPolicy -path ]` @@ -199,3 +213,22 @@ Below are the details about each command. * `[-disablePolicy -policy ]` Disable an erasure coding policy. + +Limitations +----------- + +Certain HDFS file write operations, i.e., `hflush`, `hsync` and `append`, +are not supported on erasure coded files due to substantial technical +challenges. + +* `append()` on an erasure coded file will throw `IOException`. +* `hflush()` and `hsync()` on `DFSStripedOutputStream` are no-op. Thus calling +`hflush()` or `hsync()` on an erasure coded file can not guarantee data +being persistent. + +A client can use [`StreamCapabilities`](../hadoop-common/filesystem/filesystem.html#interface_StreamCapabilities) +API to query whether a `OutputStream` supports `hflush()` and `hsync()`. +If the client desires data persistence via `hflush()` and `hsync()`, the current +remedy is creating such files as regular 3x replication files in a +non-erasure-coded directory, or using `FSDataOutputStreamBuilder#replicate()` +API to create 3x replication files in an erasure-coded directory. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md index c50253459da..82b5cec09f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md @@ -322,7 +322,7 @@ Configuration Parameters * `dfs.namenode.posix.acl.inheritance.enabled` - Set to true to enable POSIX style ACL inheritance. Disabled by default. + Set to true to enable POSIX style ACL inheritance. Enabled by default. When it is enabled and the create request comes from a compatible client, the NameNode will apply default ACLs from the parent directory to the create mode and ignore the client umask. If no default ACL is found, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 7544c80ae6c..03834ebf07d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -495,6 +495,45 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus +### List a File + +* Submit a HTTP GET request. + + curl -i "http://:/webhdfs/v1/?op=LISTSTATUS" + + The client receives a response with a [`FileStatuses` JSON object](#FileStatuses_JSON_Schema): + + HTTP/1.1 200 OK + Content-Type: application/json + Content-Length: 427 + + { + "FileStatuses": + { + "FileStatus": + [ + { + "accessTime" : 1320171722771, + "blockSize" : 33554432, + "childrenNum" : 0, + "fileId" : 16390, + "group" : "supergroup", + "length" : 1366, + "modificationTime": 1501770633062, + "owner" : "webuser", + "pathSuffix" : "", + "permission" : "644", + "replication" : 1, + "storagePolicy" : 0, + "type" : "FILE" + } + ] + } + } + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus + + ### Iteratively List a Directory * Submit a HTTP GET request. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java index 3c73c28c2a6..dcd91c7d848 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java @@ -72,16 +72,16 @@ public class TestGenericRefresh { public void setUp() throws Exception { // Register Handlers, first one just sends an ok response firstHandler = Mockito.mock(RefreshHandler.class); - Mockito.stub(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) - .toReturn(RefreshResponse.successResponse()); + Mockito.when(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) + .thenReturn(RefreshResponse.successResponse()); RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler); // Second handler has conditional response for testing args secondHandler = Mockito.mock(RefreshHandler.class); - Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"})) - .toReturn(new RefreshResponse(3, "three")); - Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one"})) - .toReturn(new RefreshResponse(2, "two")); + Mockito.when(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"})) + .thenReturn(new RefreshResponse(3, "three")); + Mockito.when(secondHandler.handleRefresh("secondHandler", new String[]{"one"})) + .thenReturn(new RefreshResponse(2, "two")); RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler); } @@ -181,12 +181,12 @@ public class TestGenericRefresh { public void testMultipleReturnCodeMerging() throws Exception { // Two handlers which return two non-zero values RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class); - Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) - .toReturn(new RefreshResponse(23, "Twenty Three")); + Mockito.when(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) + .thenReturn(new RefreshResponse(23, "Twenty Three")); RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class); - Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) - .toReturn(new RefreshResponse(10, "Ten")); + Mockito.when(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) + .thenReturn(new RefreshResponse(10, "Ten")); // Then registered to the same ID RefreshRegistry.defaultRegistry().register("shared", handlerOne); @@ -210,12 +210,12 @@ public class TestGenericRefresh { public void testExceptionResultsInNormalError() throws Exception { // In this test, we ensure that all handlers are called even if we throw an exception in one RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class); - Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) - .toThrow(new RuntimeException("Exceptional Handler Throws Exception")); + Mockito.when(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) + .thenThrow(new RuntimeException("Exceptional Handler Throws Exception")); RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class); - Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) - .toThrow(new RuntimeException("More Exceptions")); + Mockito.when(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))) + .thenThrow(new RuntimeException("More Exceptions")); RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler); RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java index 75111bb4844..9cf2180ff59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java @@ -34,6 +34,8 @@ public class TestAclCLI extends CLITestHelperDFS { protected void initConf() { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY, false); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java index 6fc97a2948d..5a04f67846b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import java.io.File; import java.io.IOException; @@ -32,6 +33,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.PathUtils; + +import org.junit.BeforeClass; import org.junit.Test; /** @@ -39,8 +42,22 @@ import org.junit.Test; */ public class TestUrlStreamHandler { - private static final File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class); - + private static final File TEST_ROOT_DIR = + PathUtils.getTestDir(TestUrlStreamHandler.class); + + private static final FsUrlStreamHandlerFactory HANDLER_FACTORY + = new FsUrlStreamHandlerFactory(); + + @BeforeClass + public static void setupHandler() { + + // Setup our own factory + // setURLStreamHandlerFactor is can be set at most once in the JVM + // the new URLStreamHandler is valid for all tests cases + // in TestStreamHandler + URL.setURLStreamHandlerFactory(HANDLER_FACTORY); + } + /** * Test opening and reading from an InputStream through a hdfs:// URL. *

@@ -55,15 +72,6 @@ public class TestUrlStreamHandler { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs = cluster.getFileSystem(); - - // Setup our own factory - // setURLSteramHandlerFactor is can be set at most once in the JVM - // the new URLStreamHandler is valid for all tests cases - // in TestStreamHandler - FsUrlStreamHandlerFactory factory = - new org.apache.hadoop.fs.FsUrlStreamHandlerFactory(); - java.net.URL.setURLStreamHandlerFactory(factory); - Path filePath = new Path("/thefile"); try { @@ -156,4 +164,22 @@ public class TestUrlStreamHandler { } + @Test + public void testHttpDefaultHandler() throws Throwable { + assertNull("Handler for HTTP is the Hadoop one", + HANDLER_FACTORY.createURLStreamHandler("http")); + } + + @Test + public void testHttpsDefaultHandler() throws Throwable { + assertNull("Handler for HTTPS is the Hadoop one", + HANDLER_FACTORY.createURLStreamHandler("https")); + } + + @Test + public void testUnknownProtocol() throws Throwable { + assertNull("Unknown protocols are not handled", + HANDLER_FACTORY.createURLStreamHandler("gopher")); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 820917f6727..ba9c436831e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -61,6 +61,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import com.google.common.base.Supplier; @@ -148,6 +149,8 @@ public class MiniDFSCluster implements AutoCloseable { public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir"; public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing"; + public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY + = DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY + ".testing"; // Changing this default may break some tests that assume it is 2. private static final int DEFAULT_STORAGES_PER_DATANODE = 2; @@ -826,7 +829,10 @@ public class MiniDFSCluster implements AutoCloseable { int safemodeExtension = conf.getInt( DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0); conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension); - conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second + long decommissionInterval = conf.getTimeDuration( + DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY, 3, TimeUnit.SECONDS); + conf.setTimeDuration(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, + decommissionInterval, TimeUnit.SECONDS); if (!useConfiguredTopologyMappingClass) { conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 7a71df8d90b..5b16f4c0388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -259,12 +259,14 @@ public class TestDFSClientRetries { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( - (short) 777), "owner", "group", new byte[0], new byte[0], + (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class), + "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( - (short) 777), "owner", "group", new byte[0], new byte[0], + (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class), + "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java index fa12f34af25..40cd676f3ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java @@ -30,7 +30,6 @@ import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.Socket; -import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY; import static org.junit.Assert.assertTrue; @@ -42,15 +41,16 @@ public class TestDFSClientSocketSize { } /** - * The setting of socket send buffer size in - * {@link java.net.Socket#setSendBufferSize(int)} is only a hint. Actual - * value may differ. We just sanity check that it is somewhere close. + * Test that the send buffer size default value is 0, in which case the socket + * will use a TCP auto-tuned value. */ @Test public void testDefaultSendBufferSize() throws IOException { - assertTrue("Send buffer size should be somewhere near default.", - getSendBufferSize(new Configuration()) >= - DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT / 2); + final int sendBufferSize = getSendBufferSize(new Configuration()); + LOG.info("If not specified, the auto tuned send buffer size is: {}", + sendBufferSize); + assertTrue("Send buffer size should be non-negative value which is " + + "determined by system (kernel).", sendBufferSize > 0); } /** @@ -73,6 +73,10 @@ public class TestDFSClientSocketSize { sendBufferSize1 > sendBufferSize2); } + /** + * Test that if the send buffer size is 0, the socket will use a TCP + * auto-tuned value. + */ @Test public void testAutoTuningSendBufferSize() throws IOException { final Configuration conf = new Configuration(); @@ -80,7 +84,7 @@ public class TestDFSClientSocketSize { final int sendBufferSize = getSendBufferSize(conf); LOG.info("The auto tuned send buffer size is: {}", sendBufferSize); assertTrue("Send buffer size should be non-negative value which is " + - "determined by system (kernel).", sendBufferSize > 0); + "determined by system (kernel).", sendBufferSize > 0); } private int getSendBufferSize(Configuration conf) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index c82c0455eee..9ae49aa9ef5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -36,12 +36,12 @@ import java.util.zip.GZIPOutputStream; import com.google.common.base.Supplier; import com.google.common.collect.Lists; + import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.log4j.Level; import org.junit.Test; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.AclEntry; @@ -65,6 +65,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.junit.rules.Timeout; import org.junit.AfterClass; @@ -115,6 +116,7 @@ public class TestDFSShell { GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1000); miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); miniCluster.waitActive(); @@ -2002,8 +2004,12 @@ public class TestDFSShell { DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0); final FileStatus status1 = dfs.getFileStatus(testDir1); final String mtime1 = fmt.format(new Date(status1.getModificationTime())); + final String atime1 = fmt.format(new Date(status1.getAccessTime())); + long now = Time.now(); + dfs.setTimes(testFile2, now + 3000, now + 6000); final FileStatus status2 = dfs.getFileStatus(testFile2); final String mtime2 = fmt.format(new Date(status2.getModificationTime())); + final String atime2 = fmt.format(new Date(status2.getAccessTime())); final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); @@ -2036,17 +2042,19 @@ public class TestDFSShell { out.toString().contains(String.valueOf(octal))); out.reset(); - doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1, testFile2); + doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2); n = status2.getPermission().toShort(); octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7); assertTrue(out.toString(), out.toString().contains(mtime1)); + assertTrue(out.toString(), out.toString().contains(atime1)); assertTrue(out.toString(), out.toString().contains("regular file")); assertTrue(out.toString(), out.toString().contains(status2.getPermission().toString())); assertTrue(out.toString(), out.toString().contains(String.valueOf(octal))); assertTrue(out.toString(), out.toString().contains(mtime2)); + assertTrue(out.toString(), out.toString().contains(atime2)); } private static void doFsStat(Configuration conf, String format, Path... files) @@ -2181,7 +2189,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); List acls = dfs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptop Path target2 = new Path(hdfsTestDir, "targetfile2"); @@ -2200,7 +2208,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(target2).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptopx Path target3 = new Path(hdfsTestDir, "targetfile3"); @@ -2221,7 +2229,7 @@ public class TestDFSShell { assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1)); acls = dfs.getAclStatus(target3).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptopa Path target4 = new Path(hdfsTestDir, "targetfile4"); @@ -2240,7 +2248,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(target4).getEntries(); assertFalse(acls.isEmpty()); - assertTrue(targetPerm.getAclBit()); + assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target4)); // -ptoa (verify -pa option will preserve permissions also) @@ -2260,7 +2268,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(target5).getEntries(); assertFalse(acls.isEmpty()); - assertTrue(targetPerm.getAclBit()); + assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target5)); } finally { if (null != shell) { @@ -2472,7 +2480,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); List acls = dfs.getAclStatus(targetDir1).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptop Path targetDir2 = new Path(hdfsTestDir, "targetDir2"); @@ -2491,7 +2499,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(targetDir2).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptopx Path targetDir3 = new Path(hdfsTestDir, "targetDir3"); @@ -2512,7 +2520,7 @@ public class TestDFSShell { assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1)); acls = dfs.getAclStatus(targetDir3).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptopa Path targetDir4 = new Path(hdfsTestDir, "targetDir4"); @@ -2531,7 +2539,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(targetDir4).getEntries(); assertFalse(acls.isEmpty()); - assertTrue(targetPerm.getAclBit()); + assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir4)); // -ptoa (verify -pa option will preserve permissions also) @@ -2551,7 +2559,7 @@ public class TestDFSShell { assertTrue(xattrs.isEmpty()); acls = dfs.getAclStatus(targetDir5).getEntries(); assertFalse(acls.isEmpty()); - assertTrue(targetPerm.getAclBit()); + assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir5)); } finally { if (shell != null) { @@ -2607,7 +2615,7 @@ public class TestDFSShell { assertTrue(perm.equals(targetPerm)); List acls = dfs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); - assertFalse(targetPerm.getAclBit()); + assertFalse(targetStatus.hasAcl()); // -ptopa preserves both sticky bit and ACL Path target2 = new Path(hdfsTestDir, "targetfile2"); @@ -2624,7 +2632,7 @@ public class TestDFSShell { assertTrue(perm.equals(targetPerm)); acls = dfs.getAclStatus(target2).getEntries(); assertFalse(acls.isEmpty()); - assertTrue(targetPerm.getAclBit()); + assertTrue(targetStatus.hasAcl()); assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target2)); } finally { if (null != shell) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 5dee6e0f08c..e42e08cf77e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -51,7 +51,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; import org.junit.Test; -import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent; import static org.junit.Assert.*; /** @@ -572,7 +571,7 @@ public class TestDFSUpgradeFromImage { Path path) throws IOException { String pathStr = path.toString(); HdfsFileStatus status = dfs.getFileInfo(pathStr); - if (!status.isDir()) { + if (!status.isDirectory()) { for (int retries = 10; retries > 0; retries--) { if (dfs.recoverLease(pathStr)) { return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index c2c6be12d1a..ac14a2a41ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; -import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -256,9 +256,10 @@ public class TestDecommission extends AdminStatesBaseTest { startSimpleHACluster(3); - // Step 1, create a cluster with 4 DNs. Blocks are stored on the first 3 DNs. - // The last DN is empty. Also configure the last DN to have slow heartbeat - // so that it will be chosen as excess replica candidate during recommission. + // Step 1, create a cluster with 4 DNs. Blocks are stored on the + // first 3 DNs. The last DN is empty. Also configure the last DN to have + // slow heartbeat so that it will be chosen as excess replica candidate + // during recommission. // Step 1.a, copy blocks to the first 3 DNs. Given the replica count is the // same as # of DNs, each DN will have a replica for any block. @@ -290,9 +291,9 @@ public class TestDecommission extends AdminStatesBaseTest { // Step 3, recommission the first DN on SBN and ANN to create excess replica // It recommissions the node on SBN first to create potential - // inconsistent state. In production cluster, such insistent state can happen - // even if recommission command was issued on ANN first given the async nature - // of the system. + // inconsistent state. In production cluster, such insistent state can + // happen even if recommission command was issued on ANN first given the + // async nature of the system. // Step 3.a, ask SBN to recomm the first DN. // SBN has been fixed so that it no longer invalidates excess replica during @@ -301,10 +302,10 @@ public class TestDecommission extends AdminStatesBaseTest { // 1. the last DN would have been chosen as excess replica, given its // heartbeat is considered old. // Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete - // 2. After recommissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 ) + // 2. After recommissionNode finishes, SBN has 3 live replicas (0, 1, 2) // and one excess replica ( 3 ) // After the fix, - // After recommissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 ) + // After recommissionNode finishes, SBN has 4 live replicas (0, 1, 2, 3) Thread.sleep(slowHeartbeatDNwaitTime); putNodeInService(1, decomNodeFromSBN); @@ -561,7 +562,8 @@ public class TestDecommission extends AdminStatesBaseTest { * federated cluster. */ @Test(timeout=360000) - public void testHostsFileFederation() throws IOException, InterruptedException { + public void testHostsFileFederation() + throws IOException, InterruptedException { // Test for 3 namenode federated cluster testHostsFile(3); } @@ -598,7 +600,8 @@ public class TestDecommission extends AdminStatesBaseTest { } @Test(timeout=120000) - public void testDecommissionWithOpenfile() throws IOException, InterruptedException { + public void testDecommissionWithOpenfile() + throws IOException, InterruptedException { LOG.info("Starting test testDecommissionWithOpenfile"); //At most 4 nodes will be decommissioned @@ -742,14 +745,15 @@ public class TestDecommission extends AdminStatesBaseTest { // make sure the two datanodes remain in decomm in progress state BlockManagerTestUtil.recheckDecommissionState(dm); - assertTrackedAndPending(dm.getDecomManager(), 2, 0); + assertTrackedAndPending(dm.getDatanodeAdminManager(), 2, 0); } /** * Tests restart of namenode while datanode hosts are added to exclude file **/ @Test(timeout=360000) - public void testDecommissionWithNamenodeRestart()throws IOException, InterruptedException { + public void testDecommissionWithNamenodeRestart() + throws IOException, InterruptedException { LOG.info("Starting test testDecommissionWithNamenodeRestart"); int numNamenodes = 1; int numDatanodes = 1; @@ -914,7 +918,7 @@ public class TestDecommission extends AdminStatesBaseTest { @Test(timeout=120000) public void testBlocksPerInterval() throws Exception { - org.apache.log4j.Logger.getLogger(DecommissionManager.class) + org.apache.log4j.Logger.getLogger(DatanodeAdminManager.class) .setLevel(Level.TRACE); // Turn the blocks per interval way down getConf().setInt( @@ -927,7 +931,8 @@ public class TestDecommission extends AdminStatesBaseTest { final FileSystem fs = getCluster().getFileSystem(); final DatanodeManager datanodeManager = getCluster().getNamesystem().getBlockManager().getDatanodeManager(); - final DecommissionManager decomManager = datanodeManager.getDecomManager(); + final DatanodeAdminManager decomManager = + datanodeManager.getDatanodeAdminManager(); // Write a 3 block file, so each node has one block. Should scan 3 nodes. DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA); @@ -944,7 +949,7 @@ public class TestDecommission extends AdminStatesBaseTest { } private void doDecomCheck(DatanodeManager datanodeManager, - DecommissionManager decomManager, int expectedNumCheckedNodes) + DatanodeAdminManager decomManager, int expectedNumCheckedNodes) throws IOException, ExecutionException, InterruptedException { // Decom all nodes ArrayList decommissionedNodes = Lists.newArrayList(); @@ -965,7 +970,7 @@ public class TestDecommission extends AdminStatesBaseTest { @Test(timeout=120000) public void testPendingNodes() throws Exception { - org.apache.log4j.Logger.getLogger(DecommissionManager.class) + org.apache.log4j.Logger.getLogger(DatanodeAdminManager.class) .setLevel(Level.TRACE); // Only allow one node to be decom'd at a time getConf().setInt( @@ -978,7 +983,8 @@ public class TestDecommission extends AdminStatesBaseTest { final FileSystem fs = getCluster().getFileSystem(); final DatanodeManager datanodeManager = getCluster().getNamesystem().getBlockManager().getDatanodeManager(); - final DecommissionManager decomManager = datanodeManager.getDecomManager(); + final DatanodeAdminManager decomManager = + datanodeManager.getDatanodeAdminManager(); // Keep a file open to prevent decom from progressing HdfsDataOutputStream open1 = @@ -1014,7 +1020,7 @@ public class TestDecommission extends AdminStatesBaseTest { assertTrackedAndPending(decomManager, 1, 0); } - private void assertTrackedAndPending(DecommissionManager decomManager, + private void assertTrackedAndPending(DatanodeAdminManager decomManager, int tracked, int pending) { assertEquals("Unexpected number of tracked nodes", tracked, decomManager.getNumTrackedNodes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index b35d3747868..95256096e73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -93,6 +93,7 @@ import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; @@ -1561,6 +1562,27 @@ public class TestDistributedFileSystem { fs.removeErasureCodingPolicy(policyName); assertEquals(policyName, ErasureCodingPolicyManager.getInstance(). getRemovedPolicies().get(0).getName()); + + // remove erasure coding policy as a user without privilege + UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( + "ProbablyNotARealUserName", new String[] {"ShangriLa"}); + final MiniDFSCluster finalCluster = cluster; + fakeUGI.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + DistributedFileSystem fs = finalCluster.getFileSystem(); + try { + fs.removeErasureCodingPolicy(policyName); + fail(); + } catch (AccessControlException ace) { + GenericTestUtils.assertExceptionContains("Access denied for user " + + "ProbablyNotARealUserName. Superuser privilege is required", + ace); + } + return null; + } + }); + } finally { if (cluster != null) { cluster.shutdown(); @@ -1609,6 +1631,34 @@ public class TestDistributedFileSystem { GenericTestUtils.assertExceptionContains("does not exists", e); // pass } + + // disable and enable erasure coding policy as a user without privilege + UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( + "ProbablyNotARealUserName", new String[] {"ShangriLa"}); + final MiniDFSCluster finalCluster = cluster; + fakeUGI.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + DistributedFileSystem fs = finalCluster.getFileSystem(); + try { + fs.disableErasureCodingPolicy(policyName); + fail(); + } catch (AccessControlException ace) { + GenericTestUtils.assertExceptionContains("Access denied for user " + + "ProbablyNotARealUserName. Superuser privilege is required", + ace); + } + try { + fs.enableErasureCodingPolicy(policyName); + fail(); + } catch (AccessControlException ace) { + GenericTestUtils.assertExceptionContains("Access denied for user " + + "ProbablyNotARealUserName. Superuser privilege is required", + ace); + } + return null; + } + }); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 8eb3b7b369a..bf02db3a093 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -891,7 +891,8 @@ public class TestEncryptionZones { CipherSuite suite, CryptoProtocolVersion version) throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( - (short) 777), "owner", "group", new byte[0], new byte[0], + (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class), + "owner", "group", new byte[0], new byte[0], 1010, 0, new FileEncryptionInfo(suite, version, new byte[suite.getAlgorithmBlockSize()], new byte[suite.getAlgorithmBlockSize()], diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index f90a2f31289..47cdf235480 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -50,8 +50,8 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.*; @@ -209,9 +209,9 @@ public class TestErasureCodingPolicies { cluster.restartNameNodes(); cluster.waitActive(); - // No policies should be enabled after restart - Assert.assertTrue("No policies should be enabled after restart", - fs.getAllErasureCodingPolicies().isEmpty()); + // Only default policy should be enabled after restart + Assert.assertEquals("Only default policy should be enabled after restart", + 1, fs.getAllErasureCodingPolicies().size()); // Already set directory-level policies should still be in effect Path disabledPolicy = new Path(dir1, "afterDisabled"); @@ -359,6 +359,24 @@ public class TestErasureCodingPolicies { } } + @Test + public void testSetDefaultPolicy() + throws IOException { + String src = "/ecDir"; + final Path ecDir = new Path(src); + try { + fs.mkdir(ecDir, FsPermission.getDirDefault()); + fs.getClient().setErasureCodingPolicy(src, null); + String actualECPolicyName = fs.getClient(). + getErasureCodingPolicy(src).getName(); + String expectedECPolicyName = + conf.get(DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT); + assertEquals(expectedECPolicyName, actualECPolicyName); + } catch (Exception e) { + } + } + @Test public void testGetAllErasureCodingPolicies() throws Exception { Collection allECPolicies = fs @@ -629,7 +647,7 @@ public class TestErasureCodingPolicies { @Test public void testGetAllErasureCodingCodecs() throws Exception { - HashMap allECCodecs = fs + Map allECCodecs = fs .getAllErasureCodingCodecs(); assertTrue("At least 3 system codecs should be enabled", allECCodecs.size() >= 3); @@ -693,5 +711,106 @@ public class TestErasureCodingPolicies { assertTrue(responses[0].isSucceed()); assertEquals(SystemErasureCodingPolicies.getPolicies().size() + 1, ErasureCodingPolicyManager.getInstance().getPolicies().length); + + // add erasure coding policy as a user without privilege + UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting( + "ProbablyNotARealUserName", new String[] {"ShangriLa"}); + final ErasureCodingPolicy ecPolicy = newPolicy; + fakeUGI.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + DistributedFileSystem fs = cluster.getFileSystem(); + try { + fs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy}); + fail(); + } catch (AccessControlException ace) { + GenericTestUtils.assertExceptionContains("Access denied for user " + + "ProbablyNotARealUserName. Superuser privilege is required", + ace); + } + return null; + } + }); + } + + @Test + public void testReplicationPolicy() throws Exception { + ErasureCodingPolicy replicaPolicy = + SystemErasureCodingPolicies.getReplicationPolicy(); + + final Path rootDir = new Path("/striped"); + final Path replicaDir = new Path(rootDir, "replica"); + final Path subReplicaDir = new Path(replicaDir, "replica"); + final Path replicaFile = new Path(replicaDir, "file"); + final Path subReplicaFile = new Path(subReplicaDir, "file"); + + fs.mkdirs(rootDir); + fs.setErasureCodingPolicy(rootDir, ecPolicy.getName()); + + // 1. At first, child directory will inherit parent's EC policy + fs.mkdirs(replicaDir); + fs.createFile(replicaFile).build().close(); + HdfsFileStatus fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile); + assertEquals("File should inherit EC policy.", ecPolicy, fileStatus + .getErasureCodingPolicy()); + assertEquals("File should be a EC file.", true, fileStatus + .isErasureCoded()); + assertEquals("File should have the same EC policy as its ancestor.", + ecPolicy, fs.getErasureCodingPolicy(replicaFile)); + fs.delete(replicaFile, false); + + // 2. Set replication policy on child directory, then get back the policy + fs.setErasureCodingPolicy(replicaDir, replicaPolicy.getName()); + ErasureCodingPolicy temp = fs.getErasureCodingPolicy(replicaDir); + assertEquals("Directory should hide replication EC policy.", + null, temp); + + // 3. New file will be replication file. Please be noted that replication + // policy only set on directory, not on file + fs.createFile(replicaFile).build().close(); + assertEquals("Replication file should have default replication factor.", + fs.getDefaultReplication(), + fs.getFileStatus(replicaFile).getReplication()); + fs.setReplication(replicaFile, (short) 2); + assertEquals("File should have replication factor as expected.", + 2, fs.getFileStatus(replicaFile).getReplication()); + fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile); + assertEquals("File should not have EC policy.", null, fileStatus + .getErasureCodingPolicy()); + assertEquals("File should not be a EC file.", false, + fileStatus.isErasureCoded()); + ErasureCodingPolicy ecPolicyOnFile = fs.getErasureCodingPolicy(replicaFile); + assertEquals("File should not have EC policy.", null, ecPolicyOnFile); + fs.delete(replicaFile, false); + + // 4. New directory under replication directory, is also replication + // directory + fs.mkdirs(subReplicaDir); + assertEquals("Directory should inherit hiding replication EC policy.", + null, fs.getErasureCodingPolicy(subReplicaDir)); + fs.createFile(subReplicaFile).build().close(); + assertEquals("File should have default replication factor.", + fs.getDefaultReplication(), + fs.getFileStatus(subReplicaFile).getReplication()); + fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile); + assertEquals("File should not have EC policy.", null, + fileStatus.getErasureCodingPolicy()); + assertEquals("File should not be a EC file.", false, + fileStatus.isErasureCoded()); + assertEquals("File should not have EC policy.", null, + fs.getErasureCodingPolicy(subReplicaFile)); + fs.delete(subReplicaFile, false); + + // 5. Unset replication policy on directory, new file will be EC file + fs.unsetErasureCodingPolicy(replicaDir); + fs.createFile(subReplicaFile).build().close(); + fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile); + assertEquals("File should inherit EC policy.", ecPolicy, + fileStatus.getErasureCodingPolicy()); + assertEquals("File should be a EC file.", true, + fileStatus.isErasureCoded()); + assertEquals("File should have the same EC policy as its ancestor", + ecPolicy, fs.getErasureCodingPolicy(subReplicaFile)); + fs.delete(subReplicaFile, false); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java new file mode 100644 index 00000000000..e5d05667f2e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.net.URI; + +import org.apache.hadoop.fs.FSProtos.FileStatusProto; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; + +import com.google.protobuf.ByteString; + +import org.junit.Test; +import static org.junit.Assert.assertEquals; + +/** + * Verify compatible FileStatus/HdfsFileStatus serialization. + */ +public class TestFileStatusSerialization { + + private static void checkFields(FileStatus expected, FileStatus actual) { + assertEquals(expected.getPath(), actual.getPath()); + assertEquals(expected.isDirectory(), actual.isDirectory()); + assertEquals(expected.getLen(), actual.getLen()); + assertEquals(expected.getPermission(), actual.getPermission()); + assertEquals(expected.getOwner(), actual.getOwner()); + assertEquals(expected.getGroup(), actual.getGroup()); + assertEquals(expected.getModificationTime(), actual.getModificationTime()); + assertEquals(expected.getAccessTime(), actual.getAccessTime()); + assertEquals(expected.getReplication(), actual.getReplication()); + assertEquals(expected.getBlockSize(), actual.getBlockSize()); + } + + /** + * Test API backwards-compatibility with 2.x applications w.r.t. FsPermission. + */ + @Test + @SuppressWarnings("deprecation") + public void testFsPermissionCompatibility() throws Exception { + final int flagmask = 0x8; + // flags compatible with 2.x; fixed as constant in this test to ensure + // compatibility is maintained. New flags are not part of the contract this + // test verifies. + for (int i = 0; i < flagmask; ++i) { + FsPermission perm = FsPermission.createImmutable((short) 0013); + HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder() + .setFileType(FileType.IS_FILE) + .setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos/zot")) + .setLength(4344) + .setPermission(PBHelperClient.convert(perm)) + .setOwner("hadoop") + .setGroup("unqbbc") + .setModificationTime(12345678L) + .setAccessTime(87654321L) + .setBlockReplication(10) + .setBlocksize(1L << 33) + .setFlags(i); + HdfsFileStatus stat = PBHelperClient.convert(hspb.build()); + stat.makeQualified(new URI("hdfs://foobar"), new Path("/dingos")); + assertEquals(new Path("hdfs://foobar/dingos/zot"), stat.getPath()); + + // verify deprecated FsPermissionExtension methods + FsPermission sp = stat.getPermission(); + assertEquals(sp.getAclBit(), stat.hasAcl()); + assertEquals(sp.getEncryptedBit(), stat.isEncrypted()); + assertEquals(sp.getErasureCodedBit(), stat.isErasureCoded()); + + // verify Writable contract + DataOutputBuffer dob = new DataOutputBuffer(); + stat.write(dob); + DataInputBuffer dib = new DataInputBuffer(); + dib.reset(dob.getData(), 0, dob.getLength()); + FileStatus fstat = new FileStatus(); + fstat.readFields(dib); + checkFields(stat, fstat); + + // FsPermisisonExtension used for HdfsFileStatus, not FileStatus, + // attribute flags should still be preserved + assertEquals(sp.getAclBit(), fstat.hasAcl()); + assertEquals(sp.getEncryptedBit(), fstat.isEncrypted()); + assertEquals(sp.getErasureCodedBit(), fstat.isErasureCoded()); + } + } + // param for LocatedFileStatus, HttpFileStatus + + @Test + public void testCrossSerializationProto() throws Exception { + FsPermission perm = FsPermission.getFileDefault(); + for (FileType t : FileType.values()) { + HdfsFileStatusProto.Builder hspb = HdfsFileStatusProto.newBuilder() + .setFileType(t) + .setPath(ByteString.copyFromUtf8("hdfs://foobar/dingos")) + .setLength(4344) + .setPermission(PBHelperClient.convert(perm)) + .setOwner("hadoop") + .setGroup("unqbbc") + .setModificationTime(12345678L) + .setAccessTime(87654321L) + .setBlockReplication(10) + .setBlocksize(1L << 33); + if (FileType.IS_SYMLINK.equals(t)) { + hspb.setSymlink(ByteString.copyFromUtf8("hdfs://yaks/dingos")); + } + if (FileType.IS_FILE.equals(t)) { + hspb.setFileId(4544); + } + HdfsFileStatusProto hsp = hspb.build(); + byte[] src = hsp.toByteArray(); + FileStatusProto fsp = FileStatusProto.parseFrom(src); + assertEquals(hsp.getPath().toStringUtf8(), fsp.getPath()); + assertEquals(hsp.getLength(), fsp.getLength()); + assertEquals(hsp.getPermission().getPerm(), + fsp.getPermission().getPerm()); + assertEquals(hsp.getOwner(), fsp.getOwner()); + assertEquals(hsp.getGroup(), fsp.getGroup()); + assertEquals(hsp.getModificationTime(), fsp.getModificationTime()); + assertEquals(hsp.getAccessTime(), fsp.getAccessTime()); + assertEquals(hsp.getSymlink().toStringUtf8(), fsp.getSymlink()); + assertEquals(hsp.getBlockReplication(), fsp.getBlockReplication()); + assertEquals(hsp.getBlocksize(), fsp.getBlockSize()); + assertEquals(hsp.getFileType().ordinal(), fsp.getFileType().ordinal()); + + // verify unknown fields preserved + byte[] dst = fsp.toByteArray(); + HdfsFileStatusProto hsp2 = HdfsFileStatusProto.parseFrom(dst); + assertEquals(hsp, hsp2); + checkFields(PBHelperClient.convert(hsp), PBHelperClient.convert(hsp2)); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 16cdf9b322d..161e2277ce5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -30,6 +30,7 @@ import java.io.DataOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.EnumSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -353,12 +354,14 @@ public class TestLease { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( - (short) 777), "owner", "group", new byte[0], new byte[0], + (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class), + "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( - (short) 777), "owner", "group", new byte[0], new byte[0], + (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class), + "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 85fc97bc151..0834d30d384 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -59,6 +59,8 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import com.google.common.base.Supplier; +import org.slf4j.LoggerFactory; +import org.slf4j.Logger; /** * This class tests the DFS positional read functionality in a single node @@ -72,6 +74,9 @@ public class TestPread { boolean simulatedStorage; boolean isHedgedRead; + private static final Logger LOG = + LoggerFactory.getLogger(TestPread.class.getName()); + @Before public void setup() { simulatedStorage = false; @@ -551,6 +556,64 @@ public class TestPread { } } + @Test(timeout=30000) + public void testHedgedReadFromAllDNFailed() throws IOException { + Configuration conf = new Configuration(); + int numHedgedReadPoolThreads = 5; + final int hedgedReadTimeoutMillis = 50; + + conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY, + numHedgedReadPoolThreads); + conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY, + hedgedReadTimeoutMillis); + conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 0); + // Set up the InjectionHandler + DFSClientFaultInjector.set(Mockito.mock(DFSClientFaultInjector.class)); + DFSClientFaultInjector injector = DFSClientFaultInjector.get(); + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + if (true) { + LOG.info("-------------- throw Checksum Exception"); + throw new ChecksumException("ChecksumException test", 100); + } + return null; + } + }).when(injector).fetchFromDatanodeException(); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) + .format(true).build(); + DistributedFileSystem fileSys = cluster.getFileSystem(); + DFSClient dfsClient = fileSys.getClient(); + FSDataOutputStream output = null; + DFSInputStream input = null; + String filename = "/hedgedReadMaxOut.dat"; + DFSHedgedReadMetrics metrics = dfsClient.getHedgedReadMetrics(); + // Metrics instance is static, so we need to reset counts from prior tests. + metrics.hedgedReadOps.set(0); + try { + Path file = new Path(filename); + output = fileSys.create(file, (short) 2); + byte[] data = new byte[64 * 1024]; + output.write(data); + output.flush(); + output.close(); + byte[] buffer = new byte[64 * 1024]; + input = dfsClient.open(filename); + input.read(0, buffer, 0, 1024); + Assert.fail("Reading the block should have thrown BlockMissingException"); + } catch (BlockMissingException e) { + assertEquals(3, input.getHedgedReadOpsLoopNumForTesting()); + assertTrue(metrics.getHedgedReadOps() == 0); + } finally { + Mockito.reset(injector); + IOUtils.cleanupWithLogger(LOG, input); + IOUtils.cleanupWithLogger(LOG, output); + fileSys.close(); + cluster.shutdown(); + } + } + /** * Scenario: 1. Write a file with RF=2, DN1 and DN2
* 2. Open the stream, Consider Locations are [DN1, DN2] in LocatedBlock.
@@ -563,7 +626,7 @@ public class TestPread { */ @Test public void testPreadFailureWithChangedBlockLocations() throws Exception { - doPreadTestWithChangedLocations(); + doPreadTestWithChangedLocations(1); } /** @@ -576,21 +639,36 @@ public class TestPread { * 7. Consider next calls to getBlockLocations() always returns DN3 as last * location.
*/ - @Test + @Test(timeout = 60000) public void testPreadHedgedFailureWithChangedBlockLocations() throws Exception { isHedgedRead = true; - doPreadTestWithChangedLocations(); + DFSClientFaultInjector old = DFSClientFaultInjector.get(); + try { + DFSClientFaultInjector.set(new DFSClientFaultInjector() { + public void sleepBeforeHedgedGet() { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + } + } + }); + doPreadTestWithChangedLocations(2); + } finally { + DFSClientFaultInjector.set(old); + } } - private void doPreadTestWithChangedLocations() + private void doPreadTestWithChangedLocations(int maxFailures) throws IOException, TimeoutException, InterruptedException { GenericTestUtils.setLogLevel(DFSClient.LOG, Level.DEBUG); Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); if (isHedgedRead) { + conf.setInt(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY, 100); conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY, 2); + conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1000); } try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) { @@ -684,6 +762,9 @@ public class TestPread { int n = din.read(0, buf, 0, data.length()); assertEquals(data.length(), n); assertEquals("Data should be read", data, new String(buf, 0, n)); + assertTrue("Read should complete with maximum " + maxFailures + + " failures, but completed with " + din.failures, + din.failures <= maxFailures); DFSClient.LOG.info("Read completed"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 29e4028f559..7cd34c2acd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,6 +45,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo; import org.apache.hadoop.hdfs.util.StripedBlockUtil; @@ -81,6 +83,7 @@ public class TestReconstructStripedFile { Any } + private Configuration conf; private MiniDFSCluster cluster; private DistributedFileSystem fs; // Map: DatanodeID -> datanode index in cluster @@ -89,7 +92,7 @@ public class TestReconstructStripedFile { @Before public void setup() throws IOException { - final Configuration conf = new Configuration(); + conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt( DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, @@ -263,6 +266,14 @@ public class TestReconstructStripedFile { return stoppedDNs; } + private static void writeFile(DistributedFileSystem fs, String fileName, + int fileLen) throws Exception { + final byte[] data = new byte[fileLen]; + Arrays.fill(data, (byte) 1); + DFSTestUtil.writeFile(fs, new Path(fileName), data); + StripedFileTestUtil.waitBlockGroupsReported(fs, fileName); + } + /** * Test the file blocks reconstruction. * 1. Check the replica is reconstructed in the target datanode, @@ -278,10 +289,7 @@ public class TestReconstructStripedFile { Path file = new Path(fileName); - final byte[] data = new byte[fileLen]; - Arrays.fill(data, (byte) 1); - DFSTestUtil.writeFile(fs, file, data); - StripedFileTestUtil.waitBlockGroupsReported(fs, fileName); + writeFile(fs, fileName, fileLen); LocatedBlocks locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs); @@ -424,4 +432,60 @@ public class TestReconstructStripedFile { ecTasks.add(invalidECInfo); dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks); } + + // HDFS-12044 + @Test(timeout = 60000) + public void testNNSendsErasureCodingTasks() throws Exception { + testNNSendsErasureCodingTasks(1); + testNNSendsErasureCodingTasks(2); + } + + private void testNNSendsErasureCodingTasks(int deadDN) throws Exception { + cluster.shutdown(); + + final int numDataNodes = dnNum + 1; + conf.setInt( + DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 10); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 20); + conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_BLK_THREADS_KEY, + 2); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDataNodes).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy(); + fs.getClient().setErasureCodingPolicy("/", policy.getName()); + + final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2; + for (int i = 0; i < 100; i++) { + writeFile(fs, "/ec-file-" + i, fileLen); + } + + // Inject data-loss by tear down desired number of DataNodes. + assertTrue(policy.getNumParityUnits() >= deadDN); + List dataNodes = new ArrayList<>(cluster.getDataNodes()); + Collections.shuffle(dataNodes); + for (DataNode dn : dataNodes.subList(0, deadDN)) { + shutdownDataNode(dn); + } + + final FSNamesystem ns = cluster.getNamesystem(); + GenericTestUtils.waitFor(() -> ns.getPendingDeletionBlocks() == 0, + 500, 30000); + + // Make sure that all pending reconstruction tasks can be processed. + while (ns.getPendingReconstructionBlocks() > 0) { + long timeoutPending = ns.getNumTimedOutPendingReconstructions(); + assertTrue(String.format("Found %d timeout pending reconstruction tasks", + timeoutPending), timeoutPending == 0); + Thread.sleep(1000); + } + + // Verify all DN reaches zero xmitsInProgress. + GenericTestUtils.waitFor(() -> + cluster.getDataNodes().stream().mapToInt( + DataNode::getXmitsInProgress).sum() == 0, + 500, 30000 + ); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index f03b440b456..f25d28f22c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -466,6 +468,29 @@ public class TestSafeMode { // expected } + ECSchema toAddSchema = new ECSchema("testcodec", 3, 2); + ErasureCodingPolicy newPolicy = + new ErasureCodingPolicy(toAddSchema, 128 * 1024); + ErasureCodingPolicy[] policyArray = + new ErasureCodingPolicy[]{newPolicy}; + try { + dfs.addErasureCodingPolicies(policyArray); + fail("AddErasureCodingPolicies should have failed."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "Cannot add erasure coding policy", ioe); + // expected + } + + try { + dfs.removeErasureCodingPolicy("testECName"); + fail("RemoveErasureCodingPolicy should have failed."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "Cannot remove erasure coding policy", ioe); + // expected + } + assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 9dd6846d7fa..28ec7082537 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -102,7 +102,7 @@ public class TestJournalNode { @Test(timeout=100000) public void testJournal() throws Exception { MetricsRecordBuilder metrics = MetricsAsserts.getMetrics( - journal.getMetricsForTests().getName()); + journal.getMetrics().getName()); MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics); MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics); MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics); @@ -117,7 +117,7 @@ public class TestJournalNode { ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get(); metrics = MetricsAsserts.getMetrics( - journal.getMetricsForTests().getName()); + journal.getMetrics().getName()); MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics); MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics); MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics); @@ -130,7 +130,7 @@ public class TestJournalNode { ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get(); metrics = MetricsAsserts.getMetrics( - journal.getMetricsForTests().getName()); + journal.getMetrics().getName()); MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics); MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics); MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java similarity index 58% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java index 8415a6f54e9..2964f05c876 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.qjournal; +package org.apache.hadoop.hdfs.qjournal.server; import com.google.common.base.Supplier; import com.google.common.collect.Lists; @@ -25,17 +25,21 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import static org.apache.hadoop.hdfs.server.namenode.FileJournalManager .getLogFile; - +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; import java.io.File; import java.io.IOException; @@ -46,6 +50,7 @@ import java.util.Random; * Unit test for Journal Node formatting upon re-installation and syncing. */ public class TestJournalNodeSync { + private Configuration conf; private MiniQJMHACluster qjmhaCluster; private MiniDFSCluster dfsCluster; private MiniJournalCluster jCluster; @@ -54,11 +59,18 @@ public class TestJournalNodeSync { private int editsPerformed = 0; private final String jid = "ns1"; + @Rule + public TestName testName = new TestName(); + @Before public void setUpMiniCluster() throws IOException { - final Configuration conf = new HdfsConfiguration(); + conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, true); conf.setLong(DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY, 1000L); + if (testName.getMethodName().equals( + "testSyncAfterJNdowntimeWithoutQJournalQueue")) { + conf.setInt(DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY, 0); + } qjmhaCluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2) .build(); dfsCluster = qjmhaCluster.getDfsCluster(); @@ -214,6 +226,156 @@ public class TestJournalNodeSync { GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000); } + // Test JournalNode Sync when a JN id down while NN is actively writing + // logs and comes back up after some time. + @Test (timeout=300_000) + public void testSyncAfterJNdowntime() throws Exception { + File firstJournalDir = jCluster.getJournalDir(0, jid); + File firstJournalCurrentDir = new StorageDirectory(firstJournalDir) + .getCurrentDir(); + File secondJournalDir = jCluster.getJournalDir(1, jid); + File secondJournalCurrentDir = new StorageDirectory(secondJournalDir) + .getCurrentDir(); + + long[] startTxIds = new long[10]; + + startTxIds[0] = generateEditLog(); + startTxIds[1] = generateEditLog(); + + // Stop the first JN + jCluster.getJournalNode(0).stop(0); + + // Roll some more edits while the first JN is down + for (int i = 2; i < 10; i++) { + startTxIds[i] = generateEditLog(5); + } + + // Re-start the first JN + jCluster.restartJournalNode(0); + + // Roll an edit to update the committed tx id of the first JN + generateEditLog(); + + // List the edit logs rolled during JN down time. + List missingLogs = Lists.newArrayList(); + for (int i = 2; i < 10; i++) { + EditLogFile logFile = getLogFile(secondJournalCurrentDir, startTxIds[i], + false); + missingLogs.add(new File(firstJournalCurrentDir, + logFile.getFile().getName())); + } + + // Check that JNSync downloaded the edit logs rolled during JN down time. + GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000); + } + + /** + * Test JournalNode Sync when a JN id down while NN is actively writing + * logs and comes back up after some time with no edit log queueing. + * Queuing disabled during the cluster setup {@link #setUpMiniCluster()} + * @throws Exception + */ + @Test (timeout=300_000) + public void testSyncAfterJNdowntimeWithoutQJournalQueue() throws Exception{ + // Queuing is disabled during the cluster setup {@link #setUpMiniCluster()} + File firstJournalDir = jCluster.getJournalDir(0, jid); + File firstJournalCurrentDir = new StorageDirectory(firstJournalDir) + .getCurrentDir(); + File secondJournalDir = jCluster.getJournalDir(1, jid); + File secondJournalCurrentDir = new StorageDirectory(secondJournalDir) + .getCurrentDir(); + + long[] startTxIds = new long[10]; + + startTxIds[0] = generateEditLog(); + startTxIds[1] = generateEditLog(2); + + // Stop the first JN + jCluster.getJournalNode(0).stop(0); + + // Roll some more edits while the first JN is down + for (int i = 2; i < 10; i++) { + startTxIds[i] = generateEditLog(5); + } + + // Re-start the first JN + jCluster.restartJournalNode(0); + + // After JN restart and before rolling another edit, the missing edit + // logs will not by synced as the committed tx id of the JN will be + // less than the start tx id's of the missing edit logs and edit log queuing + // has been disabled. + // Roll an edit to update the committed tx id of the first JN + generateEditLog(2); + + // List the edit logs rolled during JN down time. + List missingLogs = Lists.newArrayList(); + for (int i = 2; i < 10; i++) { + EditLogFile logFile = getLogFile(secondJournalCurrentDir, startTxIds[i], + false); + missingLogs.add(new File(firstJournalCurrentDir, + logFile.getFile().getName())); + } + + // Check that JNSync downloaded the edit logs rolled during JN down time. + GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000); + + // Check that all the missing edit logs have been downloaded via + // JournalNodeSyncer alone (as the edit log queueing has been disabled) + long numEditLogsSynced = jCluster.getJournalNode(0).getOrCreateJournal(jid) + .getMetrics().getNumEditLogsSynced().value(); + Assert.assertTrue("Edit logs downloaded outside syncer. Expected 8 or " + + "more downloads, got " + numEditLogsSynced + " downloads instead", + numEditLogsSynced >= 8); + } + + // Test JournalNode Sync when a JN is formatted while NN is actively writing + // logs. + @Test (timeout=300_000) + public void testSyncAfterJNformat() throws Exception{ + File firstJournalDir = jCluster.getJournalDir(0, jid); + File firstJournalCurrentDir = new StorageDirectory(firstJournalDir) + .getCurrentDir(); + File secondJournalDir = jCluster.getJournalDir(1, jid); + File secondJournalCurrentDir = new StorageDirectory(secondJournalDir) + .getCurrentDir(); + + long[] startTxIds = new long[10]; + + startTxIds[0] = generateEditLog(1); + startTxIds[1] = generateEditLog(2); + startTxIds[2] = generateEditLog(4); + startTxIds[3] = generateEditLog(6); + + Journal journal1 = jCluster.getJournalNode(0).getOrCreateJournal(jid); + NamespaceInfo nsInfo = journal1.getStorage().getNamespaceInfo(); + + // Delete contents of current directory of one JN + for (File file : firstJournalCurrentDir.listFiles()) { + file.delete(); + } + + // Format the JN + journal1.format(nsInfo); + + // Roll some more edits + for (int i = 4; i < 10; i++) { + startTxIds[i] = generateEditLog(5); + } + + // List the edit logs rolled during JN down time. + List missingLogs = Lists.newArrayList(); + for (int i = 0; i < 10; i++) { + EditLogFile logFile = getLogFile(secondJournalCurrentDir, startTxIds[i], + false); + missingLogs.add(new File(firstJournalCurrentDir, + logFile.getFile().getName())); + } + + // Check that the formatted JN has all the edit logs. + GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000); + } + private File deleteEditLog(File currentDir, long startTxId) throws IOException { EditLogFile logFile = getLogFile(currentDir, startTxId); @@ -242,8 +404,20 @@ public class TestJournalNodeSync { * @return the startTxId of next segment after rolling edits. */ private long generateEditLog() throws IOException { + return generateEditLog(1); + } + + /** + * Does specified number of edits and rolls the Edit Log. + * + * @param numEdits number of Edits to perform + * @return the startTxId of next segment after rolling edits. + */ + private long generateEditLog(int numEdits) throws IOException { long startTxId = namesystem.getFSImage().getEditLog().getLastWrittenTxId(); - Assert.assertTrue("Failed to do an edit", doAnEdit()); + for (int i = 1; i <= numEdits; i++) { + Assert.assertTrue("Failed to do an edit", doAnEdit()); + } dfsCluster.getNameNode(0).getRpcServer().rollEditLog(); return startTxId; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 77e2ffb1dc3..7ee766fb294 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -328,7 +328,7 @@ public class BlockManagerTestUtil { */ public static void recheckDecommissionState(DatanodeManager dm) throws ExecutionException, InterruptedException { - dm.getDecomManager().runMonitorForTest(); + dm.getDatanodeAdminManager().runMonitorForTest(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 6b1a9795de3..42aeadf272d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -1459,4 +1459,58 @@ public class TestBlockManager { } } + @Test + public void testMetaSaveMissingReplicas() throws Exception { + List origStorages = getStorages(0, 1); + List origNodes = getNodes(origStorages); + BlockInfo block = makeBlockReplicasMissing(0, origNodes); + File file = new File("test.log"); + PrintWriter out = new PrintWriter(file); + bm.metaSave(out); + out.flush(); + FileInputStream fstream = new FileInputStream(file); + DataInputStream in = new DataInputStream(fstream); + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + StringBuffer buffer = new StringBuffer(); + String line; + try { + while ((line = reader.readLine()) != null) { + buffer.append(line); + } + String output = buffer.toString(); + assertTrue("Metasave output should have reported missing blocks.", + output.contains("Metasave: Blocks currently missing: 1")); + assertTrue("There should be 0 blocks waiting for reconstruction", + output.contains("Metasave: Blocks waiting for reconstruction: 0")); + String blockNameGS = block.getBlockName() + "_" + + block.getGenerationStamp(); + assertTrue("Block " + blockNameGS + " should be MISSING.", + output.contains(blockNameGS + " MISSING")); + } finally { + reader.close(); + file.delete(); + } + } + + private BlockInfo makeBlockReplicasMissing(long blockId, + List nodesList) throws IOException { + long inodeId = ++mockINodeId; + final INodeFile bc = TestINodeFile.createINodeFile(inodeId); + + BlockInfo blockInfo = blockOnNodes(blockId, nodesList); + blockInfo.setReplication((short) 3); + blockInfo.setBlockCollectionId(inodeId); + + Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId); + bm.blocksMap.addBlockCollection(blockInfo, bc); + bm.markBlockReplicasAsCorrupt(blockInfo, blockInfo, + blockInfo.getGenerationStamp() + 1, + blockInfo.getNumBytes(), + new DatanodeStorageInfo[]{}); + BlockCollection mockedBc = Mockito.mock(BlockCollection.class); + Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo}); + bm.checkRedundancy(mockedBc); + return blockInfo; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index de002f48573..286f4a45a31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -25,6 +25,7 @@ import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -500,46 +501,93 @@ public class TestDatanodeManager { "127.0.0.1:23456", bothAgain.get(1).getInfoAddr()); } - @Test - public void testPendingRecoveryTasks() throws IOException { + /** + * Verify the correctness of pending recovery process. + * + * @param numReplicationBlocks the number of replication blocks in the queue. + * @param numECBlocks number of EC blocks in the queue. + * @param maxTransfers the maxTransfer value. + * @param numReplicationTasks the number of replication tasks polled from + * the queue. + * @param numECTasks the number of EC tasks polled from the queue. + * + * @throws IOException + */ + private void verifyPendingRecoveryTasks( + int numReplicationBlocks, int numECBlocks, + int maxTransfers, int numReplicationTasks, int numECTasks) + throws IOException { FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); Configuration conf = new Configuration(); DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf)); - int maxTransfers = 20; - int numPendingTasks = 7; - int numECTasks = maxTransfers - numPendingTasks; - DatanodeDescriptor nodeInfo = Mockito.mock(DatanodeDescriptor.class); Mockito.when(nodeInfo.isRegistered()).thenReturn(true); Mockito.when(nodeInfo.getStorageInfos()) .thenReturn(new DatanodeStorageInfo[0]); - List pendingList = - Collections.nCopies(numPendingTasks, new BlockTargetPair(null, null)); - Mockito.when(nodeInfo.getReplicationCommand(maxTransfers)) - .thenReturn(pendingList); - List ecPendingList = - Collections.nCopies(numECTasks, null); + if (numReplicationBlocks > 0) { + Mockito.when(nodeInfo.getNumberOfReplicateBlocks()) + .thenReturn(numReplicationBlocks); + + List tasks = + Collections.nCopies( + Math.min(numReplicationTasks, numReplicationBlocks), + new BlockTargetPair(null, null)); + Mockito.when(nodeInfo.getReplicationCommand(numReplicationTasks)) + .thenReturn(tasks); + } + + if (numECBlocks > 0) { + Mockito.when(nodeInfo.getNumberOfBlocksToBeErasureCoded()) + .thenReturn(numECBlocks); + + List tasks = + Collections.nCopies(numECTasks, null); + Mockito.when(nodeInfo.getErasureCodeCommand(numECTasks)) + .thenReturn(tasks); + } - Mockito.when(nodeInfo.getErasureCodeCommand(numECTasks)) - .thenReturn(ecPendingList); DatanodeRegistration dnReg = Mockito.mock(DatanodeRegistration.class); Mockito.when(dm.getDatanode(dnReg)).thenReturn(nodeInfo); - DatanodeCommand[] cmds = dm.handleHeartbeat( dnReg, new StorageReport[1], "bp-123", 0, 0, 10, maxTransfers, 0, null, SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT); - assertEquals(2, cmds.length); - assertTrue(cmds[0] instanceof BlockCommand); - BlockCommand replicaCmd = (BlockCommand) cmds[0]; - assertEquals(numPendingTasks, replicaCmd.getBlocks().length); - assertEquals(numPendingTasks, replicaCmd.getTargets().length); - assertTrue(cmds[1] instanceof BlockECReconstructionCommand); - BlockECReconstructionCommand ecRecoveryCmd = - (BlockECReconstructionCommand) cmds[1]; - assertEquals(numECTasks, ecRecoveryCmd.getECTasks().size()); + long expectedNumCmds = Arrays.stream( + new int[]{numReplicationTasks, numECTasks}) + .filter(x -> x > 0) + .count(); + assertEquals(expectedNumCmds, cmds.length); + + int idx = 0; + if (numReplicationTasks > 0) { + assertTrue(cmds[idx] instanceof BlockCommand); + BlockCommand cmd = (BlockCommand) cmds[0]; + assertEquals(numReplicationTasks, cmd.getBlocks().length); + assertEquals(numReplicationTasks, cmd.getTargets().length); + idx++; + } + + if (numECTasks > 0) { + assertTrue(cmds[idx] instanceof BlockECReconstructionCommand); + BlockECReconstructionCommand cmd = + (BlockECReconstructionCommand) cmds[idx]; + assertEquals(numECTasks, cmd.getECTasks().size()); + } + + Mockito.verify(nodeInfo).getReplicationCommand(numReplicationTasks); + Mockito.verify(nodeInfo).getErasureCodeCommand(numECTasks); + } + + @Test + public void testPendingRecoveryTasks() throws IOException { + // Tasks are slitted according to the ratio between queue lengths. + verifyPendingRecoveryTasks(20, 20, 20, 10, 10); + verifyPendingRecoveryTasks(40, 10, 20, 16, 4); + + // Approximately load tasks if the ratio between queue length is large. + verifyPendingRecoveryTasks(400, 1, 20, 20, 1); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java index 4ecfd50a30d..aaa48997eac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java @@ -330,8 +330,9 @@ public class TestReconstructStripedBlocksWithRackAwareness { // start decommissioning h9 boolean satisfied = bm.isPlacementPolicySatisfied(blockInfo); Assert.assertFalse(satisfied); - final DecommissionManager decomManager = - (DecommissionManager) Whitebox.getInternalState(dm, "decomManager"); + final DatanodeAdminManager decomManager = + (DatanodeAdminManager) Whitebox.getInternalState( + dm, "datanodeAdminManager"); cluster.getNamesystem().writeLock(); try { dn9.stopDecommission(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java index bcd8245c53b..fef0b45f39c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java @@ -100,7 +100,7 @@ public class TestReplicationPolicyConsiderLoad // returns false for (int i = 0; i < 3; i++) { DatanodeDescriptor d = dataNodes[i]; - dnManager.getDecomManager().startDecommission(d); + dnManager.getDatanodeAdminManager().startDecommission(d); d.setDecommissioned(); } assertEquals((double)load/3, dnManager.getFSClusterStats() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java new file mode 100644 index 00000000000..bdcbe7fffc7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.net.*; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.protocol.datatransfer.*; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; +import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.ServerSocketUtil; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.DataChecksum; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.mockito.ArgumentCaptor; + +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.net.ServerSocket; +import java.net.Socket; + +import static org.junit.Assert.fail; +import static org.mockito.Mockito.*; + +/** + * Mock-based unit test to verify that DataXceiver does not fail when no + * storageId or targetStorageTypes are passed - as is the case in Hadoop 2.x. + */ +public class TestDataXceiverBackwardsCompat { + @Rule + public Timeout timeout = new Timeout(60000); + + private void failWithException(String message, Exception exception) { + ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + exception.printStackTrace(new PrintStream(buffer)); + String stacktrace = buffer.toString(); + + fail(message + ": " + exception + "; " + exception.getMessage() + "\n" + + stacktrace); + } + + /** + * Used for mocking DataNode. Mockito does not provide a way to mock + * properties (like data or saslClient) so we have to manually set up mocks + * of those properties inside our own class. + */ + public class NullDataNode extends DataNode { + public NullDataNode(Configuration conf, OutputStream out, int port) throws + Exception { + super(conf); + data = (FsDatasetSpi)mock(FsDatasetSpi.class); + saslClient = mock(SaslDataTransferClient.class); + + IOStreamPair pair = new IOStreamPair(null, out); + + doReturn(pair).when(saslClient).socketSend( + any(Socket.class), any(OutputStream.class), any(InputStream.class), + any(DataEncryptionKeyFactory.class), any(Token.class), + any(DatanodeID.class)); + doReturn(mock(ReplicaHandler.class)).when(data).createTemporary( + any(StorageType.class), any(String.class), any(ExtendedBlock.class), + anyBoolean()); + + new Thread(new NullServer(port)).start(); + } + + @Override + public DatanodeRegistration getDNRegistrationForBP(String bpid) + throws IOException { + return null; + } + + @Override + public Socket newSocket() throws IOException { + return new Socket(); + } + + /** + * Class for accepting incoming an incoming connection. Does not read + * data or repeat in any way: simply allows a single client to connect to + * a local URL. + */ + private class NullServer implements Runnable { + + private ServerSocket serverSocket; + + NullServer(int port) throws IOException { + serverSocket = new ServerSocket(port); + } + + @Override + public void run() { + try { + serverSocket.accept(); + serverSocket.close(); + LOG.info("Client connection accepted by NullServer"); + } catch (Exception e) { + LOG.info("Exception in NullServer: " + e + "; " + e.getMessage()); + } + } + } + } + + @Test + public void testBackwardsCompat() throws Exception { + Peer peer = mock(Peer.class); + doReturn("").when(peer).getRemoteAddressString(); + Configuration conf = new Configuration(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + int port = ServerSocketUtil.getPort(1234, 10); + DataNode dataNode = new NullDataNode(conf, out, port); + DataXceiverServer server = new DataXceiverServer( + mock(PeerServer.class), conf, dataNode); + DataXceiver xceiver = spy(DataXceiver.create(peer, dataNode, server)); + + BlockReceiver mockBlockReceiver = mock(BlockReceiver.class); + doReturn(mock(Replica.class)).when(mockBlockReceiver).getReplica(); + + DatanodeInfo[] targets = {mock(DatanodeInfo.class)}; + doReturn("localhost:" + port).when(targets[0]).getXferAddr(true); + doReturn("127.0.0.1:" + port).when(targets[0]).getXferAddr(false); + StorageType[] storageTypes = {StorageType.RAM_DISK}; + + doReturn(mockBlockReceiver).when(xceiver).getBlockReceiver( + any(ExtendedBlock.class), any(StorageType.class), + any(DataInputStream.class), anyString(), anyString(), + any(BlockConstructionStage.class), anyLong(), anyLong(), anyLong(), + anyString(), any(DatanodeInfo.class), any(DataNode.class), + any(DataChecksum.class), any(CachingStrategy.class), + ArgumentCaptor.forClass(Boolean.class).capture(), + anyBoolean(), any(String.class)); + + Token token = (Token)mock( + Token.class); + doReturn("".getBytes()).when(token).getIdentifier(); + doReturn("".getBytes()).when(token).getPassword(); + doReturn(new Text("")).when(token).getKind(); + doReturn(new Text("")).when(token).getService(); + + DataChecksum checksum = mock(DataChecksum.class); + doReturn(DataChecksum.Type.NULL).when(checksum).getChecksumType(); + + DatanodeInfo datanodeInfo = mock(DatanodeInfo.class); + doReturn("localhost").when(datanodeInfo).getHostName(); + doReturn("127.0.0.1").when(datanodeInfo).getIpAddr(); + doReturn(DatanodeInfo.AdminStates.NORMAL).when(datanodeInfo) + .getAdminState(); + + Exception storedException = null; + try { + xceiver.writeBlock( + new ExtendedBlock("Dummy-pool", 0L), + StorageType.RAM_DISK, + token, + "Dummy-Client", + targets, + storageTypes, + datanodeInfo, + BlockConstructionStage.PIPELINE_SETUP_CREATE, + 0, 0, 0, 0, + checksum, + CachingStrategy.newDefaultStrategy(), + false, + false, new boolean[0], null, new String[0]); + } catch (Exception e) { + // Not enough things have been mocked for this to complete without + // exceptions, but we want to make sure we can at least get as far as + // sending data to the server with null values for storageId and + // targetStorageTypes. + storedException = e; + } + byte[] output = out.toByteArray(); + if (output.length == 0) { + if (storedException == null) { + failWithException("No output written, but no exception either (this " + + "shouldn't happen", storedException); + } else { + failWithException("Exception occurred before anything was written", + storedException); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 077997f68a5..764a0dbddd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -283,7 +283,7 @@ public class TestStorageMover { private void verifyRecursively(final Path parent, final HdfsFileStatus status) throws Exception { - if (status.isDir()) { + if (status.isDirectory()) { Path fullPath = parent == null ? new Path("/") : status.getFullPath(parent); DirectoryListing children = dfs.getClient().listPaths( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java index 52a68584dd6..646e80083dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java @@ -21,6 +21,7 @@ import static org.junit.Assert.*; import java.io.IOException; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; @@ -141,6 +142,11 @@ public final class AclTestHelpers { } } + public static void assertPermission(FileSystem fs, Path pathToCheck, + short perm) throws IOException { + assertPermission(fs, pathToCheck, perm, (perm & (1 << 12)) != 0); + } + /** * Asserts the value of the FsPermission bits on the inode of a specific path. * @@ -150,10 +156,11 @@ public final class AclTestHelpers { * @throws IOException thrown if there is an I/O error */ public static void assertPermission(FileSystem fs, Path pathToCheck, - short perm) throws IOException { + short perm, boolean hasAcl) throws IOException { short filteredPerm = (short)(perm & 01777); - FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission(); + FileStatus stat = fs.getFileStatus(pathToCheck); + FsPermission fsPermission = stat.getPermission(); assertEquals(filteredPerm, fsPermission.toShort()); - assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit()); + assertEquals(hasAcl, stat.hasAcl()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index 60b0ab168d2..ee922176262 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -32,6 +32,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; @@ -886,6 +887,11 @@ public abstract class FSAclBaseTest { FsPermission perm = inode.getFsPermission(); assertNotNull(perm); assertEquals(0755, perm.toShort()); + FileStatus stat = fs.getFileStatus(path); + assertFalse(stat.hasAcl()); + assertFalse(stat.isEncrypted()); + assertFalse(stat.isErasureCoded()); + // backwards-compat check assertEquals(0755, perm.toExtendedShort()); assertAclFeature(false); } @@ -903,7 +909,7 @@ public abstract class FSAclBaseTest { assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); - assertPermission(filePath, (short)010640); + assertPermission(filePath, (short)010660); assertAclFeature(filePath, true); } @@ -1003,7 +1009,7 @@ public abstract class FSAclBaseTest { aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE) }, returned); - assertPermission(dirPath, (short)010750); + assertPermission(dirPath, (short)010770); assertAclFeature(dirPath, true); } @@ -1120,7 +1126,7 @@ public abstract class FSAclBaseTest { s = fs.getAclStatus(filePath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(filePath, (short)010640); + assertPermission(filePath, (short)010660); assertAclFeature(filePath, true); } @@ -1149,7 +1155,7 @@ public abstract class FSAclBaseTest { s = fs.getAclStatus(subdirPath); returned = s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected, returned); - assertPermission(subdirPath, (short)010750); + assertPermission(subdirPath, (short)010770); assertAclFeature(subdirPath, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index c5566993285..623c444f714 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -424,9 +424,9 @@ public class TestAddStripedBlocks { cluster.getDataNodes().get(3).getDatanodeId(), reports[0]); BlockManagerTestUtil.updateState(ns.getBlockManager()); // the total number of corrupted block info is still 1 - Assert.assertEquals(1, ns.getCorruptECBlockGroupsStat()); + Assert.assertEquals(1, ns.getCorruptECBlockGroups()); Assert.assertEquals(1, ns.getCorruptReplicaBlocks()); - Assert.assertEquals(0, ns.getCorruptBlocksStat()); + Assert.assertEquals(0, ns.getCorruptReplicatedBlocks()); // 2 internal blocks corrupted Assert.assertEquals(2, bm.getCorruptReplicas(stored).size()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 11d74312379..cfebff7bd47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; -import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.tools.DFSAdmin; @@ -100,7 +100,7 @@ public class TestDecommissioningStatus { fileSys = cluster.getFileSystem(); cluster.getNamesystem().getBlockManager().getDatanodeManager() .setHeartbeatExpireInterval(3000); - Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG); + Logger.getLogger(DatanodeAdminManager.class).setLevel(Level.DEBUG); LOG = Logger.getLogger(TestDecommissioningStatus.class); } @@ -344,7 +344,7 @@ public class TestDecommissioningStatus { */ @Test(timeout=120000) public void testDecommissionDeadDN() throws Exception { - Logger log = Logger.getLogger(DecommissionManager.class); + Logger log = Logger.getLogger(DatanodeAdminManager.class); log.setLevel(Level.DEBUG); DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId(); String dnName = dnID.getXferAddr(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java index eab1199d976..205593f597e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java @@ -156,11 +156,11 @@ public class TestDefaultBlockPlacementPolicy { DatanodeDescriptor dnd3 = dnm.getDatanode( cluster.getDataNodes().get(3).getDatanodeId()); assertEquals(dnd3.getNetworkLocation(), clientRack); - dnm.getDecomManager().startDecommission(dnd3); + dnm.getDatanodeAdminManager().startDecommission(dnd3); try { testPlacement(clientMachine, clientRack, false); } finally { - dnm.getDecomManager().stopDecommission(dnd3); + dnm.getDatanodeAdminManager().stopDecommission(dnd3); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java index fe95734cb37..d769f8bc6b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java @@ -75,7 +75,7 @@ public class TestEnabledECPolicies { String defaultECPolicies = conf.get( DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); - expectValidPolicy(defaultECPolicies, 0); + expectValidPolicy(defaultECPolicies, 1); } @Test @@ -98,10 +98,10 @@ public class TestEnabledECPolicies { String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy().getName(); expectValidPolicy(ecPolicyName, 1); expectValidPolicy(ecPolicyName + ", ", 1); - expectValidPolicy(",", 0); + expectValidPolicy(",", 1); expectValidPolicy(", " + ecPolicyName, 1); - expectValidPolicy(" ", 0); - expectValidPolicy(" , ", 0); + expectValidPolicy(" ", 1); + expectValidPolicy(" , ", 1); } @Test @@ -147,7 +147,7 @@ public class TestEnabledECPolicies { Assert.assertTrue("Did not find specified EC policy " + p.getName(), found.contains(p.getName())); } - Assert.assertEquals(enabledPolicies.length, found.size()); + Assert.assertEquals(enabledPolicies.length, found.size()-1); // Check that getEnabledPolicyByName only returns enabled policies for (ErasureCodingPolicy p: SystemErasureCodingPolicies.getPolicies()) { if (found.contains(p.getName())) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 22c40fb2e1a..9256056b4e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -723,4 +723,91 @@ public class TestFSImage { .getBlockType()); assertEquals(defaultBlockType, BlockType.CONTIGUOUS); } + + /** + * Test if a INodeFile under a replication EC policy directory + * can be saved by FSImageSerialization and loaded by FSImageFormat#Loader. + */ + @Test + public void testSaveAndLoadFileUnderReplicationPolicyDir() + throws IOException { + Configuration conf = new Configuration(); + DFSTestUtil.enableAllECPolicies(conf); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + FSNamesystem fsn = cluster.getNamesystem(); + DistributedFileSystem fs = cluster.getFileSystem(); + ErasureCodingPolicy replicaPolicy = + SystemErasureCodingPolicies.getReplicationPolicy(); + ErasureCodingPolicy defaultEcPolicy = + StripedFileTestUtil.getDefaultECPolicy(); + + final Path ecDir = new Path("/ec"); + final Path replicaDir = new Path(ecDir, "replica"); + final Path replicaFile1 = new Path(replicaDir, "f1"); + final Path replicaFile2 = new Path(replicaDir, "f2"); + + // create root directory + fs.mkdir(ecDir, null); + fs.setErasureCodingPolicy(ecDir, defaultEcPolicy.getName()); + + // create directory, and set replication Policy + fs.mkdir(replicaDir, null); + fs.setErasureCodingPolicy(replicaDir, replicaPolicy.getName()); + + // create an empty file f1 + fs.create(replicaFile1).close(); + + // create an under-construction file f2 + FSDataOutputStream out = fs.create(replicaFile2, (short) 2); + out.writeBytes("hello"); + ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet + .of(SyncFlag.UPDATE_LENGTH)); + + // checkpoint + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNode(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + + assertTrue(fs.getFileStatus(ecDir).isDirectory()); + assertTrue(fs.getFileStatus(replicaDir).isDirectory()); + assertTrue(fs.exists(replicaFile1)); + assertTrue(fs.exists(replicaFile2)); + + // check directories + assertEquals("Directory should have default EC policy.", + defaultEcPolicy, fs.getErasureCodingPolicy(ecDir)); + assertEquals("Directory should hide replication EC policy.", + null, fs.getErasureCodingPolicy(replicaDir)); + + // check file1 + assertEquals("File should not have EC policy.", null, + fs.getErasureCodingPolicy(replicaFile1)); + // check internals of file2 + INodeFile file2Node = + fsn.dir.getINode4Write(replicaFile2.toString()).asFile(); + assertEquals("hello".length(), file2Node.computeFileSize()); + assertTrue(file2Node.isUnderConstruction()); + BlockInfo[] blks = file2Node.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState()); + assertEquals("File should return expected replication factor.", + 2, blks[0].getReplication()); + assertEquals("File should not have EC policy.", null, + fs.getErasureCodingPolicy(replicaFile2)); + // check lease manager + Lease lease = fsn.leaseManager.getLease(file2Node); + Assert.assertNotNull(lease); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java index 48d3dea81c3..d9c24d9c9f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java @@ -138,13 +138,15 @@ public class TestFSImageWithAcl { aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) }; + short permExpected = (short)010775; + AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(fileExpected, fileReturned); AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)010755); + assertPermission(fs, subdirPath, permExpected); restart(fs, persistNamespace); @@ -154,7 +156,7 @@ public class TestFSImageWithAcl { subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)010755); + assertPermission(fs, subdirPath, permExpected); aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)); fs.modifyAclEntries(dirPath, aclSpec); @@ -165,7 +167,7 @@ public class TestFSImageWithAcl { subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)010755); + assertPermission(fs, subdirPath, permExpected); restart(fs, persistNamespace); @@ -175,7 +177,7 @@ public class TestFSImageWithAcl { subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)010755); + assertPermission(fs, subdirPath, permExpected); fs.removeAcl(dirPath); @@ -185,7 +187,7 @@ public class TestFSImageWithAcl { subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)010755); + assertPermission(fs, subdirPath, permExpected); restart(fs, persistNamespace); @@ -195,7 +197,7 @@ public class TestFSImageWithAcl { subdirReturned = fs.getAclStatus(subdirPath).getEntries() .toArray(new AclEntry[0]); Assert.assertArrayEquals(subdirExpected, subdirReturned); - assertPermission(fs, subdirPath, (short)010755); + assertPermission(fs, subdirPath, permExpected); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 6c755e78ece..7cdbde21d0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -48,6 +48,7 @@ import java.nio.channels.FileChannel; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -946,7 +947,8 @@ public class TestFsck { DatanodeDescriptor dnDesc0 = dnm.getDatanode( cluster.getDataNodes().get(0).getDatanodeId()); - bm.getDatanodeManager().getDecomManager().startDecommission(dnDesc0); + bm.getDatanodeManager().getDatanodeAdminManager().startDecommission( + dnDesc0); final String dn0Name = dnDesc0.getXferAddr(); // check the replica status while decommissioning @@ -999,7 +1001,7 @@ public class TestFsck { cluster.getDataNodes().get(1).getDatanodeId()); final String dn1Name = dnDesc1.getXferAddr(); - bm.getDatanodeManager().getDecomManager().startMaintenance(dnDesc1, + bm.getDatanodeManager().getDatanodeAdminManager().startMaintenance(dnDesc1, Long.MAX_VALUE); // check the replica status while entering maintenance @@ -1355,7 +1357,8 @@ public class TestFsck { byte storagePolicy = 0; HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, - blockSize, modTime, accessTime, perms, owner, group, symlink, + blockSize, modTime, accessTime, perms, + EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null); Result replRes = new ReplicationResult(conf); Result ecRes = new ErasureCodingResult(conf); @@ -1537,7 +1540,7 @@ public class TestFsck { fsn.writeUnlock(); } DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0); - bm.getDatanodeManager().getDecomManager().startDecommission(dn); + bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn); String dnName = dn.getXferAddr(); //wait for decommission start @@ -1617,7 +1620,7 @@ public class TestFsck { DatanodeManager dnm = bm.getDatanodeManager(); DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0) .getDatanodeId()); - bm.getDatanodeManager().getDecomManager().startMaintenance(dn, + bm.getDatanodeManager().getDatanodeAdminManager().startMaintenance(dn, Long.MAX_VALUE); final String dnName = dn.getXferAddr(); @@ -1852,7 +1855,7 @@ public class TestFsck { } DatanodeDescriptor dn = bc.getBlocks()[0] .getDatanode(0); - bm.getDatanodeManager().getDecomManager().startDecommission(dn); + bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn); String dnName = dn.getXferAddr(); // wait for decommission start @@ -1931,7 +1934,7 @@ public class TestFsck { DatanodeManager dnm = bm.getDatanodeManager(); DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0) .getDatanodeId()); - bm.getDatanodeManager().getDecomManager().startMaintenance(dn, + bm.getDatanodeManager().getDatanodeAdminManager().startMaintenance(dn, Long.MAX_VALUE); final String dnName = dn.getXferAddr(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 0303a5d8d7f..8cc1433cef5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -155,6 +155,8 @@ public class TestMetaSave { line = reader.readLine(); assertTrue(line.equals("Metasave: Blocks waiting for reconstruction: 0")); line = reader.readLine(); + assertTrue(line.equals("Metasave: Blocks currently missing: 0")); + line = reader.readLine(); assertTrue(line.equals("Mis-replicated blocks that have been postponed:")); line = reader.readLine(); assertTrue(line.equals("Metasave: Blocks being reconstructed: 0")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 32c2a4926aa..937bb61c7ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -370,7 +370,7 @@ public class TestNameNodeMXBean { cluster.getDataNodes().get(0).getDisplayName()); fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); - // Wait for the DecommissionManager to complete refresh nodes + // Wait for the DatanodeAdminManager to complete refresh nodes GenericTestUtils.waitFor(new Supplier() { @Override public Boolean get() { @@ -399,7 +399,7 @@ public class TestNameNodeMXBean { assertEquals(0, fsn.getNumDecomLiveDataNodes()); assertEquals(0, fsn.getNumDecomDeadDataNodes()); - // Wait for the DecommissionManager to complete check + // Wait for the DatanodeAdminManager to complete check GenericTestUtils.waitFor(new Supplier() { @Override public Boolean get() { @@ -501,7 +501,7 @@ public class TestNameNodeMXBean { assertEquals(0, fsn.getNumInMaintenanceDeadDataNodes()); } - // Wait for the DecommissionManager to complete check + // Wait for the DatanodeAdminManager to complete check // and perform state transition while (fsn.getNumInMaintenanceLiveDataNodes() != 1) { Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); @@ -809,10 +809,10 @@ public class TestNameNodeMXBean { long totalMissingBlocks = cluster.getNamesystem().getMissingBlocksCount(); Long replicaMissingBlocks = (Long) mbs.getAttribute(replStateMBeanName, - "MissingBlocksStat"); + "MissingReplicatedBlocks"); Long ecMissingBlocks = (Long) mbs.getAttribute(ecBlkGrpStateMBeanName, - "MissingECBlockGroupsStat"); + "MissingECBlockGroups"); assertEquals("Unexpected total missing blocks!", expectedMissingBlockCount, totalMissingBlocks); assertEquals("Unexpected total missing blocks!", @@ -826,10 +826,10 @@ public class TestNameNodeMXBean { cluster.getNamesystem().getCorruptReplicaBlocks(); Long replicaCorruptBlocks = (Long) mbs.getAttribute(replStateMBeanName, - "CorruptBlocksStat"); + "CorruptReplicatedBlocks"); Long ecCorruptBlocks = (Long) mbs.getAttribute(ecBlkGrpStateMBeanName, - "CorruptECBlockGroupsStat"); + "CorruptECBlockGroups"); assertEquals("Unexpected total corrupt blocks!", expectedCorruptBlockCount, totalCorruptBlocks); assertEquals("Unexpected total corrupt blocks!", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java index 7ee49a96524..92b96a533c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java @@ -138,5 +138,30 @@ public class TestNameNodeOptionParsing { } } } - + + @Test + public void testFormat() { + String[] args = new String[] {"-format"}; + StartupOption opt = NameNode.parseArguments(args); + assertEquals(StartupOption.FORMAT, opt); + assertEquals(true, opt.getInteractiveFormat()); + assertEquals(false, opt.getForceFormat()); + + args = new String[] {"-format", "-nonInteractive"}; + opt = NameNode.parseArguments(args); + assertEquals(StartupOption.FORMAT, opt); + assertEquals(false, opt.getInteractiveFormat()); + assertEquals(false, opt.getForceFormat()); + + args = new String[] {"-format", "-nonInteractive", "-force"}; + opt = NameNode.parseArguments(args); + assertEquals(StartupOption.FORMAT, opt); + assertEquals(false, opt.getInteractiveFormat()); + assertEquals(true, opt.getForceFormat()); + + // test error condition + args = new String[] {"-nonInteractive"}; + opt = NameNode.parseArguments(args); + assertNull(opt); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index 6b553df117c..c0de63a8f5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -40,6 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT; public class TestNameNodeReconfigure { @@ -48,10 +49,13 @@ public class TestNameNodeReconfigure { .getLog(TestNameNodeReconfigure.class); private MiniDFSCluster cluster; + private final int customizedBlockInvalidateLimit = 500; @Before public void setUp() throws IOException { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY, + customizedBlockInvalidateLimit); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); } @@ -212,6 +216,38 @@ public class TestNameNodeReconfigure { datanodeManager.getHeartbeatRecheckInterval()); } + @Test + public void testBlockInvalidateLimitAfterReconfigured() + throws ReconfigurationException { + final NameNode nameNode = cluster.getNameNode(); + final DatanodeManager datanodeManager = nameNode.namesystem + .getBlockManager().getDatanodeManager(); + + assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set", + customizedBlockInvalidateLimit, + datanodeManager.getBlockInvalidateLimit()); + + nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, + Integer.toString(6)); + + // 20 * 6 = 120 < 500 + // Invalid block limit should stay same as before after reconfiguration. + assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + + " is not honored after reconfiguration", + customizedBlockInvalidateLimit, + datanodeManager.getBlockInvalidateLimit()); + + nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, + Integer.toString(50)); + + // 20 * 50 = 1000 > 500 + // Invalid block limit should be reset to 1000 + assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + + " is not reconfigured correctly", + 1000, + datanodeManager.getBlockInvalidateLimit()); + } + @After public void shutDown() throws IOException { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index c9fe2c3d570..b7f0cfc1f63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -349,18 +349,18 @@ public class TestNamenodeCapacityReport { private void startDecommissionOrMaintenance(DatanodeManager dnm, DatanodeDescriptor dnd, boolean decomm) { if (decomm) { - dnm.getDecomManager().startDecommission(dnd); + dnm.getDatanodeAdminManager().startDecommission(dnd); } else { - dnm.getDecomManager().startMaintenance(dnd, Long.MAX_VALUE); + dnm.getDatanodeAdminManager().startMaintenance(dnd, Long.MAX_VALUE); } } private void stopDecommissionOrMaintenance(DatanodeManager dnm, DatanodeDescriptor dnd, boolean decomm) { if (decomm) { - dnm.getDecomManager().stopDecommission(dnd); + dnm.getDatanodeAdminManager().stopDecommission(dnd); } else { - dnm.getDecomManager().stopMaintenance(dnd); + dnm.getDatanodeAdminManager().stopMaintenance(dnd); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 540ae635031..02075f045d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -419,7 +419,7 @@ public class TestReconstructStripedBlocks { // Verify low redundancy count matching EC block groups count BlockManagerTestUtil.updateState(bm); - assertEquals(blockGroups, bm.getLowRedundancyECBlockGroupsStat()); + assertEquals(blockGroups, bm.getLowRedundancyECBlockGroups()); DFSTestUtil.verifyClientStats(conf, dfsCluster); @@ -429,7 +429,7 @@ public class TestReconstructStripedBlocks { // Verify pending reconstruction count assertEquals(blockGroups, getNumberOfBlocksToBeErasureCoded(dfsCluster)); - assertEquals(0, bm.getLowRedundancyECBlockGroupsStat()); + assertEquals(0, bm.getLowRedundancyECBlockGroups()); DFSTestUtil.verifyClientStats(conf, dfsCluster); } finally { dfsCluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 29a6064c705..94172bbe696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -450,7 +450,7 @@ public class TestStartup { namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true); NamenodeProtocols nnRpc = namenode.getRpcServer(); - assertTrue(nnRpc.getFileInfo("/test").isDir()); + assertTrue(nnRpc.getFileInfo("/test").isDirectory()); nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); nnRpc.saveNamespace(0, 0); namenode.stop(); @@ -481,7 +481,7 @@ public class TestStartup { private void checkNameSpace(Configuration conf) throws IOException { NameNode namenode = new NameNode(conf); NamenodeProtocols nnRpc = namenode.getRpcServer(); - assertTrue(nnRpc.getFileInfo("/test").isDir()); + assertTrue(nnRpc.getFileInfo("/test").isDirectory()); nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); nnRpc.saveNamespace(0, 0); namenode.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 5a2aff90a7b..b3bb3dda201 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -124,7 +124,7 @@ public class TestEditLogTailer { for (int i = 0; i < DIRS_TO_MAKE / 2; i++) { assertTrue(NameNodeAdapter.getFileInfo(nn2, - getDirPath(i), false).isDir()); + getDirPath(i), false).isDirectory()); } for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { @@ -137,7 +137,7 @@ public class TestEditLogTailer { for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) { assertTrue(NameNodeAdapter.getFileInfo(nn2, - getDirPath(i), false).isDir()); + getDirPath(i), false).isDirectory()); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 38c2b2da4a0..93c717c3eb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -205,7 +205,7 @@ public class TestFailureToReadEdits { TEST_DIR1, false)); // Should have been successfully created. assertTrue(NameNodeAdapter.getFileInfo(nn1, - TEST_DIR2, false).isDir()); + TEST_DIR2, false).isDirectory()); // Null because it hasn't been created yet. assertNull(NameNodeAdapter.getFileInfo(nn1, TEST_DIR3, false)); @@ -219,10 +219,10 @@ public class TestFailureToReadEdits { TEST_DIR1, false)); // Should have been successfully created. assertTrue(NameNodeAdapter.getFileInfo(nn1, - TEST_DIR2, false).isDir()); + TEST_DIR2, false).isDirectory()); // Should now have been successfully created. assertTrue(NameNodeAdapter.getFileInfo(nn1, - TEST_DIR3, false).isDir()); + TEST_DIR3, false).isDirectory()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index 856ed8fbc86..8eeb853c77d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -128,7 +128,7 @@ public class TestInitializeSharedEdits { HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), - newPath.toString(), false).isDir()); + newPath.toString(), false).isDirectory()); } finally { if (fs != null) { fs.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index f598d8c6adf..b983fd16262 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.metrics; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -41,7 +42,9 @@ import java.io.DataInputStream; import java.io.File; import java.io.IOException; import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; import java.util.EnumSet; +import java.util.List; import java.util.Random; import com.google.common.collect.ImmutableList; @@ -69,12 +72,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.MockNameNodeResourceChecker; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; +import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -115,6 +121,15 @@ public class TestNameNodeMetrics { CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFS_REDUNDANCY_INTERVAL); + // Set it long enough to essentially disable unless we manually call it + // Used for decommissioning DataNode metrics + CONF.setTimeDuration( + MiniDFSCluster.DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY, 999, + TimeUnit.DAYS); + // Next two configs used for checking failed volume metrics + CONF.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, + 10, TimeUnit.MILLISECONDS); + CONF.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, DFS_REDUNDANCY_INTERVAL); CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, @@ -133,6 +148,7 @@ public class TestNameNodeMetrics { private DistributedFileSystem fs; private final Random rand = new Random(); private FSNamesystem namesystem; + private HostsFileWriter hostsFileWriter; private BlockManager bm; private Path ecDir; @@ -142,6 +158,8 @@ public class TestNameNodeMetrics { @Before public void setUp() throws Exception { + hostsFileWriter = new HostsFileWriter(); + hostsFileWriter.initialize(CONF, "temp/decommission"); cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT) .build(); cluster.waitActive(); @@ -161,6 +179,10 @@ public class TestNameNodeMetrics { MetricsRecordBuilder rb = getMetrics(source); assertQuantileGauges("GetGroups1s", rb); } + if (hostsFileWriter != null) { + hostsFileWriter.cleanup(); + hostsFileWriter = null; + } if (cluster != null) { cluster.shutdown(); cluster = null; @@ -235,6 +257,96 @@ public class TestNameNodeMetrics { .getBlockManager()); assertGauge("StaleDataNodes", 0, getMetrics(NS_METRICS)); } + + /** + * Test metrics associated with volume failures. + */ + @Test + public void testVolumeFailures() throws Exception { + assertGauge("VolumeFailuresTotal", 0, getMetrics(NS_METRICS)); + assertGauge("EstimatedCapacityLostTotal", 0L, getMetrics(NS_METRICS)); + DataNode dn = cluster.getDataNodes().get(0); + FsDatasetSpi.FsVolumeReferences volumeReferences = + DataNodeTestUtils.getFSDataset(dn).getFsVolumeReferences(); + FsVolumeImpl fsVolume = (FsVolumeImpl) volumeReferences.get(0); + File dataDir = new File(fsVolume.getBaseURI()); + long capacity = fsVolume.getCapacity(); + volumeReferences.close(); + DataNodeTestUtils.injectDataDirFailure(dataDir); + DataNodeTestUtils.waitForDiskError(dn, fsVolume); + DataNodeTestUtils.triggerHeartbeat(dn); + BlockManagerTestUtil.checkHeartbeat(bm); + assertGauge("VolumeFailuresTotal", 1, getMetrics(NS_METRICS)); + assertGauge("EstimatedCapacityLostTotal", capacity, getMetrics(NS_METRICS)); + } + + /** + * Test metrics associated with liveness and decommission status of DataNodes. + */ + @Test + public void testDataNodeLivenessAndDecom() throws Exception { + List dataNodes = cluster.getDataNodes(); + DatanodeDescriptor[] dnDescriptors = new DatanodeDescriptor[DATANODE_COUNT]; + String[] dnAddresses = new String[DATANODE_COUNT]; + for (int i = 0; i < DATANODE_COUNT; i++) { + dnDescriptors[i] = bm.getDatanodeManager() + .getDatanode(dataNodes.get(i).getDatanodeId()); + dnAddresses[i] = dnDescriptors[i].getXferAddr(); + } + // First put all DNs into include + hostsFileWriter.initIncludeHosts(dnAddresses); + bm.getDatanodeManager().refreshNodes(CONF); + assertGauge("NumDecomLiveDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumLiveDataNodes", DATANODE_COUNT, getMetrics(NS_METRICS)); + + // Now decommission one DN + hostsFileWriter.initExcludeHost(dnAddresses[0]); + bm.getDatanodeManager().refreshNodes(CONF); + assertGauge("NumDecommissioningDataNodes", 1, getMetrics(NS_METRICS)); + BlockManagerTestUtil.recheckDecommissionState(bm.getDatanodeManager()); + assertGauge("NumDecommissioningDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumDecomLiveDataNodes", 1, getMetrics(NS_METRICS)); + assertGauge("NumLiveDataNodes", DATANODE_COUNT, getMetrics(NS_METRICS)); + + // Now kill all DNs by expiring their heartbeats + for (int i = 0; i < DATANODE_COUNT; i++) { + DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNodes.get(i), true); + long expireInterval = CONF.getLong( + DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT) * 2L + + CONF.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, + DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 10 * 1000L; + DFSTestUtil.resetLastUpdatesWithOffset(dnDescriptors[i], + -(expireInterval + 1)); + } + BlockManagerTestUtil.checkHeartbeat(bm); + assertGauge("NumDecomLiveDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumDecomDeadDataNodes", 1, getMetrics(NS_METRICS)); + assertGauge("NumLiveDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumDeadDataNodes", DATANODE_COUNT, getMetrics(NS_METRICS)); + + // Now remove the decommissioned DN altogether + String[] includeHosts = new String[dnAddresses.length - 1]; + for (int i = 0; i < includeHosts.length; i++) { + includeHosts[i] = dnAddresses[i + 1]; + } + hostsFileWriter.initIncludeHosts(includeHosts); + hostsFileWriter.initExcludeHosts(new ArrayList<>()); + bm.getDatanodeManager().refreshNodes(CONF); + assertGauge("NumDecomLiveDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumDecomDeadDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumLiveDataNodes", 0, getMetrics(NS_METRICS)); + assertGauge("NumDeadDataNodes", DATANODE_COUNT - 1, getMetrics(NS_METRICS)); + + // Finally mark the remaining DNs as live again + for (int i = 1; i < dataNodes.size(); i++) { + DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNodes.get(i), false); + DFSTestUtil.resetLastUpdatesWithOffset(dnDescriptors[i], 0); + } + BlockManagerTestUtil.checkHeartbeat(bm); + assertGauge("NumLiveDataNodes", DATANODE_COUNT - 1, getMetrics(NS_METRICS)); + assertGauge("NumDeadDataNodes", 0, getMetrics(NS_METRICS)); + } /** Test metrics associated with addition of a file */ @Test @@ -317,27 +429,27 @@ public class TestNameNodeMetrics { namesystem.getUnderReplicatedBlocks()); assertEquals("Low redundancy metrics not matching!", namesystem.getLowRedundancyBlocks(), - namesystem.getLowRedundancyBlocksStat() + - namesystem.getLowRedundancyECBlockGroupsStat()); + namesystem.getLowRedundancyReplicatedBlocks() + + namesystem.getLowRedundancyECBlockGroups()); assertEquals("Corrupt blocks metrics not matching!", namesystem.getCorruptReplicaBlocks(), - namesystem.getCorruptBlocksStat() + - namesystem.getCorruptECBlockGroupsStat()); + namesystem.getCorruptReplicatedBlocks() + + namesystem.getCorruptECBlockGroups()); assertEquals("Missing blocks metrics not matching!", namesystem.getMissingBlocksCount(), - namesystem.getMissingBlocksStat() + - namesystem.getMissingECBlockGroupsStat()); + namesystem.getMissingReplicatedBlocks() + + namesystem.getMissingECBlockGroups()); assertEquals("Missing blocks with replication factor one not matching!", namesystem.getMissingReplOneBlocksCount(), - namesystem.getMissingReplicationOneBlocksStat()); + namesystem.getMissingReplicationOneBlocks()); assertEquals("Bytes in future blocks metrics not matching!", namesystem.getBytesInFuture(), - namesystem.getBlocksBytesInFutureStat() + - namesystem.getECBlocksBytesInFutureStat()); + namesystem.getBytesInFutureReplicatedBlocks() + + namesystem.getBytesInFutureECBlockGroups()); assertEquals("Pending deletion blocks metrics not matching!", namesystem.getPendingDeletionBlocks(), - namesystem.getPendingDeletionBlocksStat() + - namesystem.getPendingDeletionECBlockGroupsStat()); + namesystem.getPendingDeletionReplicatedBlocks() + + namesystem.getPendingDeletionECBlockGroups()); } /** Corrupt a block and ensure metrics reflects it */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 7aaadf85d0b..537612ca29a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -23,9 +23,14 @@ import java.util.EnumSet; import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSOutputStream; @@ -38,12 +43,15 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.util.Time; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestOpenFilesWithSnapshot { + private static final Log LOG = + LogFactory.getLog(TestOpenFilesWithSnapshot.class.getName()); private final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; DistributedFileSystem fs = null; @@ -496,6 +504,345 @@ public class TestOpenFilesWithSnapshot { flumeOutputStream.close(); } + /** + * Test snapshot capturing open files when an open file with active lease + * is deleted by the client. + */ + @Test (timeout = 120000) + public void testSnapshotsForOpenFilesAndDeletion() throws Exception { + // Construct the directory tree + final Path snapRootDir = new Path("/level_0_A"); + final String flumeFileName = "flume.log"; + final String hbaseFileName = "hbase.log"; + final String snap1Name = "snap_1"; + final String snap2Name = "snap_2"; + final String snap3Name = "snap_3"; + + // Create files and open streams + final Path flumeFile = new Path(snapRootDir, flumeFileName); + createFile(flumeFile); + final Path hbaseFile = new Path(snapRootDir, hbaseFileName); + createFile(hbaseFile); + FSDataOutputStream flumeOutputStream = fs.append(flumeFile); + FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile); + + // Create Snapshot S1 + final Path snap1Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap1Name); + final Path flumeS1Path = new Path(snap1Dir, flumeFileName); + final long flumeFileLengthAfterS1 = fs.getFileStatus(flumeFile).getLen(); + final Path hbaseS1Path = new Path(snap1Dir, hbaseFileName); + final long hbaseFileLengthAfterS1 = fs.getFileStatus(hbaseFile).getLen(); + + // Verify if Snap S1 file length is same as the the current versions + Assert.assertEquals(flumeFileLengthAfterS1, + fs.getFileStatus(flumeS1Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS1, + fs.getFileStatus(hbaseS1Path).getLen()); + + long flumeFileWrittenDataLength = flumeFileLengthAfterS1; + long hbaseFileWrittenDataLength = hbaseFileLengthAfterS1; + int newWriteLength = (int) (BLOCKSIZE * 1.5); + byte[] buf = new byte[newWriteLength]; + Random random = new Random(); + random.nextBytes(buf); + + // Write more data to files + flumeFileWrittenDataLength += writeToStream(flumeOutputStream, buf); + hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf); + + // Create Snapshot S2 + final Path snap2Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap2Name); + final Path flumeS2Path = new Path(snap2Dir, flumeFileName); + final Path hbaseS2Path = new Path(snap2Dir, hbaseFileName); + + // Verify current files length are same as all data written till now + final long flumeFileLengthAfterS2 = fs.getFileStatus(flumeFile).getLen(); + Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2); + final long hbaseFileLengthAfterS2 = fs.getFileStatus(hbaseFile).getLen(); + Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2); + + // Verify if Snap S2 file length is same as the current versions + Assert.assertEquals(flumeFileLengthAfterS2, + fs.getFileStatus(flumeS2Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS2, + fs.getFileStatus(hbaseS2Path).getLen()); + + // Write more data to open files + writeToStream(flumeOutputStream, buf); + hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf); + + // Verify old snapshots have point-in-time/frozen file + // lengths even after the current versions have moved forward. + Assert.assertEquals(flumeFileLengthAfterS1, + fs.getFileStatus(flumeS1Path).getLen()); + Assert.assertEquals(flumeFileLengthAfterS2, + fs.getFileStatus(flumeS2Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS1, + fs.getFileStatus(hbaseS1Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS2, + fs.getFileStatus(hbaseS2Path).getLen()); + + // Delete flume current file. Snapshots should + // still have references to flume file. + boolean flumeFileDeleted = fs.delete(flumeFile, true); + Assert.assertTrue(flumeFileDeleted); + Assert.assertFalse(fs.exists(flumeFile)); + Assert.assertTrue(fs.exists(flumeS1Path)); + Assert.assertTrue(fs.exists(flumeS2Path)); + + SnapshotTestHelper.createSnapshot(fs, snapRootDir, "tmp_snap"); + fs.deleteSnapshot(snapRootDir, "tmp_snap"); + + // Delete snap_2. snap_1 still has reference to + // the flume file. + fs.deleteSnapshot(snapRootDir, snap2Name); + Assert.assertFalse(fs.exists(flumeS2Path)); + Assert.assertTrue(fs.exists(flumeS1Path)); + + // Delete snap_1. Now all traces of flume file + // is gone. + fs.deleteSnapshot(snapRootDir, snap1Name); + Assert.assertFalse(fs.exists(flumeS2Path)); + Assert.assertFalse(fs.exists(flumeS1Path)); + + // Create Snapshot S3 + final Path snap3Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap3Name); + final Path hbaseS3Path = new Path(snap3Dir, hbaseFileName); + + // Verify live files length is same as all data written till now + final long hbaseFileLengthAfterS3 = fs.getFileStatus(hbaseFile).getLen(); + Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3); + + // Write more data to open files + hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf); + + // Verify old snapshots have point-in-time/frozen file + // lengths even after the flume open file is deleted and + // the hbase live file has moved forward. + Assert.assertEquals(hbaseFileLengthAfterS3, + fs.getFileStatus(hbaseS3Path).getLen()); + Assert.assertEquals(hbaseFileWrittenDataLength, + fs.getFileStatus(hbaseFile).getLen()); + + hbaseOutputStream.close(); + } + + /** + * Test client writing to open files are not interrupted when snapshots + * that captured open files get deleted. + */ + @Test (timeout = 240000) + public void testOpenFileWritingAcrossSnapDeletion() throws Exception { + final Path snapRootDir = new Path("/level_0_A"); + final String flumeFileName = "flume.log"; + final String hbaseFileName = "hbase.log"; + final String snap1Name = "snap_1"; + final String snap2Name = "snap_2"; + final String snap3Name = "snap_3"; + + // Create files and open streams + final Path flumeFile = new Path(snapRootDir, flumeFileName); + FSDataOutputStream flumeOut = fs.create(flumeFile, false, + 8000, (short)3, 1048576); + flumeOut.close(); + final Path hbaseFile = new Path(snapRootDir, hbaseFileName); + FSDataOutputStream hbaseOut = fs.create(hbaseFile, false, + 8000, (short)3, 1048576); + hbaseOut.close(); + + final AtomicBoolean writerError = new AtomicBoolean(false); + final CountDownLatch startLatch = new CountDownLatch(1); + final CountDownLatch deleteLatch = new CountDownLatch(1); + Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + FSDataOutputStream flumeOutputStream = fs.append(flumeFile, 8000); + FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile, 8000); + byte[] bytes = new byte[(int) (1024 * 0.2)]; + Random r = new Random(Time.now()); + + for (int i = 0; i < 200000; i++) { + r.nextBytes(bytes); + flumeOutputStream.write(bytes); + if (hbaseOutputStream != null) { + hbaseOutputStream.write(bytes); + } + if (i == 50000) { + startLatch.countDown(); + } else if (i == 100000) { + deleteLatch.countDown(); + } else if (i == 150000) { + hbaseOutputStream.hsync(); + fs.delete(hbaseFile, true); + try { + hbaseOutputStream.close(); + } catch (Exception e) { + // since the file is deleted before the open stream close, + // it might throw FileNotFoundException. Ignore the + // expected exception. + } + hbaseOutputStream = null; + } else if (i % 5000 == 0) { + LOG.info("Write pos: " + flumeOutputStream.getPos() + + ", size: " + fs.getFileStatus(flumeFile).getLen() + + ", loop: " + (i + 1)); + } + } + } catch (Exception e) { + LOG.warn("Writer error: " + e); + writerError.set(true); + } + } + }); + t.start(); + + startLatch.await(); + final Path snap1Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap1Name); + final Path flumeS1Path = new Path(snap1Dir, flumeFileName); + LOG.info("Snap1 file status: " + fs.getFileStatus(flumeS1Path)); + LOG.info("Current file status: " + fs.getFileStatus(flumeFile)); + + deleteLatch.await(); + LOG.info("Snap1 file status: " + fs.getFileStatus(flumeS1Path)); + LOG.info("Current file status: " + fs.getFileStatus(flumeFile)); + + // Verify deletion of snapshot which had the under construction file + // captured is not truncating the under construction file and the thread + // writing to the same file not crashing on newer block allocations. + LOG.info("Deleting " + snap1Name); + fs.deleteSnapshot(snapRootDir, snap1Name); + + // Verify creation and deletion of snapshot newer than the oldest + // snapshot is not crashing the thread writing to under construction file. + SnapshotTestHelper.createSnapshot(fs, snapRootDir, snap2Name); + SnapshotTestHelper.createSnapshot(fs, snapRootDir, snap3Name); + fs.deleteSnapshot(snapRootDir, snap3Name); + fs.deleteSnapshot(snapRootDir, snap2Name); + SnapshotTestHelper.createSnapshot(fs, snapRootDir, "test"); + + t.join(); + Assert.assertFalse("Client encountered writing error!", writerError.get()); + + restartNameNode(); + cluster.waitActive(); + } + + /** + * Verify snapshots with open files captured are safe even when the + * 'current' version of the file is truncated and appended later. + */ + @Test (timeout = 120000) + public void testOpenFilesSnapChecksumWithTrunkAndAppend() throws Exception { + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, + true); + // Construct the directory tree + final Path dir = new Path("/A/B/C"); + fs.mkdirs(dir); + + // String constants + final Path hbaseSnapRootDir = dir; + final String hbaseFileName = "hbase.wal"; + final String hbaseSnap1Name = "hbase_snap_s1"; + final String hbaseSnap2Name = "hbase_snap_s2"; + final String hbaseSnap3Name = "hbase_snap_s3"; + final String hbaseSnap4Name = "hbase_snap_s4"; + + // Create files and open a stream + final Path hbaseFile = new Path(dir, hbaseFileName); + createFile(hbaseFile); + final FileChecksum hbaseWALFileCksum0 = + fs.getFileChecksum(hbaseFile); + FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile); + + // Create Snapshot S1 + final Path hbaseS1Dir = SnapshotTestHelper.createSnapshot( + fs, hbaseSnapRootDir, hbaseSnap1Name); + final Path hbaseS1Path = new Path(hbaseS1Dir, hbaseFileName); + final FileChecksum hbaseFileCksumS1 = fs.getFileChecksum(hbaseS1Path); + + // Verify if Snap S1 checksum is same as the current version one + Assert.assertEquals("Live and snap1 file checksum doesn't match!", + hbaseWALFileCksum0, fs.getFileChecksum(hbaseS1Path)); + + int newWriteLength = (int) (BLOCKSIZE * 1.5); + byte[] buf = new byte[newWriteLength]; + Random random = new Random(); + random.nextBytes(buf); + writeToStream(hbaseOutputStream, buf); + + // Create Snapshot S2 + final Path hbaseS2Dir = SnapshotTestHelper.createSnapshot( + fs, hbaseSnapRootDir, hbaseSnap2Name); + final Path hbaseS2Path = new Path(hbaseS2Dir, hbaseFileName); + final FileChecksum hbaseFileCksumS2 = fs.getFileChecksum(hbaseS2Path); + + // Verify if the s1 checksum is still the same + Assert.assertEquals("Snap file checksum has changed!", + hbaseFileCksumS1, fs.getFileChecksum(hbaseS1Path)); + // Verify if the s2 checksum is different from the s1 checksum + Assert.assertNotEquals("Snap1 and snap2 file checksum should differ!", + hbaseFileCksumS1, hbaseFileCksumS2); + + newWriteLength = (int) (BLOCKSIZE * 2.5); + buf = new byte[newWriteLength]; + random.nextBytes(buf); + writeToStream(hbaseOutputStream, buf); + + // Create Snapshot S3 + final Path hbaseS3Dir = SnapshotTestHelper.createSnapshot( + fs, hbaseSnapRootDir, hbaseSnap3Name); + final Path hbaseS3Path = new Path(hbaseS3Dir, hbaseFileName); + FileChecksum hbaseFileCksumS3 = fs.getFileChecksum(hbaseS3Path); + + // Record the checksum for the before truncate current file + hbaseOutputStream.close(); + final FileChecksum hbaseFileCksumBeforeTruncate = + fs.getFileChecksum(hbaseFile); + Assert.assertEquals("Snap3 and before truncate file checksum should match!", + hbaseFileCksumBeforeTruncate, hbaseFileCksumS3); + + // Truncate the current file and record the after truncate checksum + long currentFileLen = fs.getFileStatus(hbaseFile).getLen(); + boolean fileTruncated = fs.truncate(hbaseFile, currentFileLen / 2); + Assert.assertTrue("File truncation failed!", fileTruncated); + final FileChecksum hbaseFileCksumAfterTruncate = + fs.getFileChecksum(hbaseFile); + + Assert.assertNotEquals("Snap3 and after truncate checksum shouldn't match!", + hbaseFileCksumS3, hbaseFileCksumAfterTruncate); + + // Append more data to the current file + hbaseOutputStream = fs.append(hbaseFile); + newWriteLength = (int) (BLOCKSIZE * 5.5); + buf = new byte[newWriteLength]; + random.nextBytes(buf); + writeToStream(hbaseOutputStream, buf); + + // Create Snapshot S4 + final Path hbaseS4Dir = SnapshotTestHelper.createSnapshot( + fs, hbaseSnapRootDir, hbaseSnap4Name); + final Path hbaseS4Path = new Path(hbaseS4Dir, hbaseFileName); + final FileChecksum hbaseFileCksumS4 = fs.getFileChecksum(hbaseS4Path); + + // Record the checksum for the current file after append + hbaseOutputStream.close(); + final FileChecksum hbaseFileCksumAfterAppend = + fs.getFileChecksum(hbaseFile); + + Assert.assertEquals("Snap4 and after append file checksum should match!", + hbaseFileCksumAfterAppend, hbaseFileCksumS4); + + // Recompute checksum for S3 path and verify it has not changed + hbaseFileCksumS3 = fs.getFileChecksum(hbaseS3Path); + Assert.assertEquals("Snap3 and before truncate file checksum should match!", + hbaseFileCksumBeforeTruncate, hbaseFileCksumS3); + } + private void restartNameNode() throws Exception { cluster.triggerBlockReports(); NameNode nameNode = cluster.getNameNode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 5a3d451bef0..edb79d3bb4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.fs.permission.FsAction.*; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; import java.io.IOException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -37,6 +38,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -51,10 +53,12 @@ import com.google.common.collect.Lists; public class TestJsonUtil { static FileStatus toFileStatus(HdfsFileStatus f, String parent) { - return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), + return new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(), f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), f.getPermission(), f.getOwner(), f.getGroup(), - f.isSymlink() ? new Path(f.getSymlink()) : null, + f.isSymlink() + ? new Path(DFSUtilClient.bytes2String(f.getSymlinkInBytes())) + : null, new Path(f.getFullName(parent))); } @@ -63,7 +67,8 @@ public class TestJsonUtil { final long now = Time.now(); final String parent = "/dir"; final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, - now, now + 10, new FsPermission((short) 0644), "user", "group", + now, now + 10, new FsPermission((short) 0644), + EnumSet.noneOf(HdfsFileStatus.Flags.class), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null); final FileStatus fstatus = toFileStatus(status, parent); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index a32cd98c49f..e271cb574bd 100644 Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index 201db9eb7da..f9011450555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,6 +1,6 @@ - + - -63 + -64 OP_START_LOG_SEGMENT @@ -63,8 +63,8 @@ 1422406380369 1422406380345 512 - - + + false xyao @@ -96,8 +96,8 @@ 1422406380376 1422406380345 512 - - + + false xyao @@ -238,8 +238,8 @@ 1422406380425 1422406380423 512 - - + + false xyao @@ -360,7 +360,7 @@ 0 1001 - + -2 @@ -393,7 +393,7 @@ 0 1002 - + -2 @@ -426,7 +426,7 @@ 0 1003 - + -2 @@ -441,8 +441,8 @@ 1422406380534 1422406380446 512 - - + + false 1073741825 @@ -513,7 +513,7 @@ 0 1004 - + -2 @@ -546,7 +546,7 @@ 0 1005 - + -2 @@ -579,7 +579,7 @@ 0 1006 - + -2 @@ -594,8 +594,8 @@ 1422406380558 1422406380537 512 - - + + false 1073741828 @@ -666,7 +666,7 @@ 0 1007 - + -2 @@ -699,7 +699,7 @@ 0 1008 - + -2 @@ -732,7 +732,7 @@ 0 1009 - + -2 @@ -747,8 +747,8 @@ 1422406380579 1422406380560 512 - - + + false 1073741831 @@ -834,7 +834,7 @@ 0 1010 - + -2 @@ -867,7 +867,7 @@ 0 1011 - + -2 @@ -882,8 +882,8 @@ 1422406380599 1422406380586 512 - - + + false 1073741834 @@ -979,7 +979,7 @@ 0 1012 - + -2 @@ -993,7 +993,7 @@ 11 1012 - + -2 @@ -1024,8 +1024,8 @@ 1422406383261 1422406380608 512 - - + + false 1073741836 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml index 127effc616b..1baf355dd29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml @@ -101,7 +101,7 @@ SubstringComparator - [-setPolicy -path <path> -policy <policy>] + [-setPolicy -path <path> [-policy <policy>] [-replicate]] @@ -237,6 +237,29 @@ + + setPolicy : set replication policy on a directory + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir + -fs NAMENODE -mkdir /ecdir/replica + -fs NAMENODE -setPolicy -replicate -path /ecdir/replica + -fs NAMENODE -touchz /ecdir/replica/file + -fs NAMENODE -getPolicy -path /ecdir/replica/file + + + -fs NAMENODE -rm /ecdir/replica/file + -fs NAMENODE -rmdir /ecdir/replica + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + is unspecified + + + + unsetPolicy : unset policy and get @@ -453,7 +476,7 @@ - setPolicy : illegal parameters - path is missing + setPolicy : illegal parameters - path option is missing -fs NAMENODE -mkdir /ecdir -fs NAMENODE -setPolicy @@ -470,7 +493,7 @@ - setPolicy : illegal parameters - policy name is missing + setPolicy : illegal parameters - path name is missing -fs NAMENODE -mkdir /ecdir -fs NAMENODE -setPolicy -path @@ -487,7 +510,7 @@ - setPolicy : illegal parameters - too many arguments + setPolicy : illegal parameters - too many arguments case 1 -fs NAMENODE -mkdir /ecdir -fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-64k /ecdir2 @@ -503,6 +526,23 @@ + + setPolicy : illegal parameters - too many arguments case 2 + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-64k -replicate /ecdir2 + + + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + -setPolicy: Too many arguments + + + + setPolicy : illegal parameters - invalidpolicy @@ -552,6 +592,71 @@ + + setPolicy : illegal parameters - wrong spelling replicate + + -fs NAMENODE -setPolicy -path /ecdir -replica + + + + + + SubstringComparator + -setPolicy: Too many arguments + + + + + + setPolicy : illegal parameters - replicate and policy coexist + + -fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-64k -replicate + + + + + + SubstringComparator + -replicate and -policy cannot been used at the same time + + + + + + setPolicy : set erasure coding policy without given a specific policy name + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -path /ecdir + + + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + Set default erasure coding policy on /ecdir + + + + + + getPolicy: get the default policy after setPolicy without given a specific policy name + + -fs NAMENODE -mkdir /ecdir + -fs NAMENODE -setPolicy -path /ecdir + -fs NAMENODE -getPolicy -path /ecdir + + + -fs NAMENODE -rmdir /ecdir + + + + SubstringComparator + RS-6-3-64k + + + + getPolicy : illegal parameters - path is missing diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred index cf17aea75a8..f66f563aaef 100755 --- a/hadoop-mapreduce-project/bin/mapred +++ b/hadoop-mapreduce-project/bin/mapred @@ -24,15 +24,15 @@ HADOOP_SHELL_EXECNAME="${MYNAME##*/}" ## @replaceable no function hadoop_usage { - hadoop_add_subcommand "classpath" "prints the class path needed for running mapreduce subcommands" - hadoop_add_subcommand "envvars" "display computed Hadoop environment variables" - hadoop_add_subcommand "historyserver" "run job history servers as a standalone daemon" - hadoop_add_subcommand "hsadmin" "job history server admin interface" - hadoop_add_subcommand "job" "manipulate MapReduce jobs" - hadoop_add_subcommand "pipes" "run a Pipes job" - hadoop_add_subcommand "queue" "get information regarding JobQueues" - hadoop_add_subcommand "sampler" "sampler" - hadoop_add_subcommand "version" "print the version" + hadoop_add_subcommand "classpath" client "prints the class path needed for running mapreduce subcommands" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "historyserver" daemon "run job history servers as a standalone daemon" + hadoop_add_subcommand "hsadmin" admin "job history server admin interface" + hadoop_add_subcommand "job" client "manipulate MapReduce jobs" + hadoop_add_subcommand "pipes" client "run a Pipes job" + hadoop_add_subcommand "queue" client "get information regarding JobQueues" + hadoop_add_subcommand "sampler" client "sampler" + hadoop_add_subcommand "version" client "print the version" hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 285d36e3186..53fe055f75e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -63,6 +63,7 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; +import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; @@ -1404,7 +1405,12 @@ public class JobHistoryEventHandler extends AbstractService qualifiedDoneFile = doneDirFS.makeQualified(new Path(doneDirPrefixPath, doneJobHistoryFileName)); - moveToDoneNow(qualifiedLogFile, qualifiedDoneFile); + if(moveToDoneNow(qualifiedLogFile, qualifiedDoneFile)) { + String historyUrl = MRWebAppUtil.getApplicationWebURLOnJHSWithScheme( + getConfig(), context.getApplicationID()); + context.setHistoryUrl(historyUrl); + LOG.info("Set historyUrl to " + historyUrl); + } } // Move confFile to Done Folder @@ -1610,7 +1616,7 @@ public class JobHistoryEventHandler extends AbstractService } } - private void moveTmpToDone(Path tmpPath) throws IOException { + protected void moveTmpToDone(Path tmpPath) throws IOException { if (tmpPath != null) { String tmpFileName = tmpPath.getName(); String fileName = getFileNameFromTmpFN(tmpFileName); @@ -1622,7 +1628,9 @@ public class JobHistoryEventHandler extends AbstractService // TODO If the FS objects are the same, this should be a rename instead of a // copy. - private void moveToDoneNow(Path fromPath, Path toPath) throws IOException { + protected boolean moveToDoneNow(Path fromPath, Path toPath) + throws IOException { + boolean success = false; // check if path exists, in case of retries it may not exist if (stagingDirFS.exists(fromPath)) { LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString()); @@ -1631,13 +1639,18 @@ public class JobHistoryEventHandler extends AbstractService boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath, false, getConfig()); - if (copied) - LOG.info("Copied to done location: " + toPath); - else - LOG.info("copy failed"); doneDirFS.setPermission(toPath, new FsPermission( JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS)); + if (copied) { + LOG.info("Copied from: " + fromPath.toString() + + " to done location: " + toPath.toString()); + success = true; + } else { + LOG.info("Copy failed from: " + fromPath.toString() + + " to done location: " + toPath.toString()); + } } + return success; } private String getTempFileName(String srcFile) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java index ddf4fa7949d..4a21396de0a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java @@ -69,4 +69,8 @@ public interface AppContext { String getNMHostname(); TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor(); + + String getHistoryUrl(); + + void setHistoryUrl(String historyUrl); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 1445481f705..f511f1917f2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -62,7 +62,6 @@ import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TypeConverter; -import org.apache.hadoop.mapreduce.counters.Limits; import org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent; import org.apache.hadoop.mapreduce.jobhistory.EventReader; import org.apache.hadoop.mapreduce.jobhistory.EventType; @@ -1079,6 +1078,7 @@ public class MRAppMaster extends CompositeService { private final ClientToAMTokenSecretManager clientToAMTokenSecretManager; private TimelineClient timelineClient = null; private TimelineV2Client timelineV2Client = null; + private String historyUrl = null; private final TaskAttemptFinishingMonitor taskAttemptFinishingMonitor; @@ -1198,6 +1198,16 @@ public class MRAppMaster extends CompositeService { public TimelineV2Client getTimelineV2Client() { return timelineV2Client; } + + @Override + public String getHistoryUrl() { + return historyUrl; + } + + @Override + public void setHistoryUrl(String historyUrl) { + this.historyUrl = historyUrl; + } } @SuppressWarnings("unchecked") @@ -1281,8 +1291,6 @@ public class MRAppMaster extends CompositeService { // finally set the job classloader MRApps.setClassLoader(jobClassLoader, getConfig()); - // set job classloader if configured - Limits.init(getConfig()); if (initFailed) { JobEvent initFailedEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT_FAILED); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index 4d155d0daba..6880b6c2e6f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -644,6 +644,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, private float reduceProgress; private float cleanupProgress; private boolean isUber = false; + private boolean finishJobWhenReducersDone; + private boolean completingJob = false; private Credentials jobCredentials; private Token jobToken; @@ -717,6 +719,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, this.maxFetchFailuresNotifications = conf.getInt( MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS, MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS); + this.finishJobWhenReducersDone = conf.getBoolean( + MRJobConfig.FINISH_JOB_WHEN_REDUCERS_DONE, + MRJobConfig.DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE); } protected StateMachine getStateMachine() { @@ -2021,7 +2026,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, TimeUnit.MILLISECONDS); return JobStateInternal.FAIL_WAIT; } - + + checkReadyForCompletionWhenAllReducersDone(job); + return job.checkReadyForCommit(); } @@ -2052,6 +2059,32 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, } job.metrics.killedTask(task); } + + /** Improvement: if all reducers have finished, we check if we have + restarted mappers that are still running. This can happen in a + situation when a node becomes UNHEALTHY and mappers are rescheduled. + See MAPREDUCE-6870 for details */ + private void checkReadyForCompletionWhenAllReducersDone(JobImpl job) { + if (job.finishJobWhenReducersDone) { + int totalReduces = job.getTotalReduces(); + int completedReduces = job.getCompletedReduces(); + + if (totalReduces > 0 && totalReduces == completedReduces + && !job.completingJob) { + + for (TaskId mapTaskId : job.mapTasks) { + MapTaskImpl task = (MapTaskImpl) job.tasks.get(mapTaskId); + if (!task.isFinished()) { + LOG.info("Killing map task " + task.getID()); + job.eventHandler.handle( + new TaskEvent(task.getID(), TaskEventType.T_KILL)); + } + } + + job.completingJob = true; + } + } + } } // Transition class for handling jobs with no tasks diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 6cec2f3abfc..a7058e05846 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -215,9 +215,7 @@ public abstract class RMCommunicator extends AbstractService } LOG.info("Setting job diagnostics to " + sb.toString()); - String historyUrl = - MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(getConfig(), - context.getApplicationID()); + String historyUrl = context.getHistoryUrl(); LOG.info("History url is " + historyUrl); FinishApplicationMasterRequest request = FinishApplicationMasterRequest.newInstance(finishState, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java index 1c90cb9d172..6db1274a924 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java @@ -84,11 +84,11 @@ public class AppController extends Controller implements AMParams { public void info() { AppInfo info = new AppInfo(app, app.context); info("Application Master Overview"). - _("Application ID:", info.getId()). - _("Application Name:", info.getName()). - _("User:", info.getUser()). - _("Started on:", Times.format(info.getStartTime())). - _("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime( + __("Application ID:", info.getId()). + __("Application Name:", info.getName()). + __("User:", info.getUser()). + __("Started on:", Times.format(info.getStartTime())). + __("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime( info.getElapsedTime() )); render(InfoPage.class); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java index 7fde95b592a..7a3e2b357e3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java @@ -25,14 +25,14 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class AppView extends TwoColumnLayout { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "jobs"); set(initID(DATATABLES, "jobs"), jobsTableInit()); setTableStyles(html, "jobs"); } - protected void commonPreHead(Page.HTML<_> html) { + protected void commonPreHead(Page.HTML<__> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java index 98a2ce1d0cc..76ef6bdcb65 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java @@ -30,10 +30,10 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -56,21 +56,21 @@ public class ConfBlock extends HtmlBlock { String jid = $(JOB_ID); if (jid.isEmpty()) { html. - p()._("Sorry, can't do anything without a JobID.")._(); + p().__("Sorry, can't do anything without a JobID.").__(); return; } JobId jobID = MRApps.toJobID(jid); Job job = appContext.getJob(jobID); if (job == null) { html. - p()._("Sorry, ", jid, " not found.")._(); + p().__("Sorry, ", jid, " not found.").__(); return; } Path confPath = job.getConfFile(); try { ConfInfo info = new ConfInfo(job); - html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString())._(); + html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString()).__(); TBODY> tbody = html. // Tasks table table("#conf"). @@ -79,8 +79,8 @@ public class ConfBlock extends HtmlBlock { th(_TH, "key"). th(_TH, "value"). th(_TH, "source chain"). - _(). - _(). + __(). + __(). tbody(); for (ConfEntryInfo entry : info.getProperties()) { StringBuffer buffer = new StringBuffer(); @@ -100,20 +100,20 @@ public class ConfBlock extends HtmlBlock { td(entry.getName()). td(entry.getValue()). td(buffer.toString()). - _(); + __(); } - tbody._(). + tbody.__(). tfoot(). tr(). - th().input("search_init").$type(InputType.text).$name("key").$value("key")._()._(). - th().input("search_init").$type(InputType.text).$name("value").$value("value")._()._(). - th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain")._()._(). - _(). - _(). - _(); + th().input("search_init").$type(InputType.text).$name("key").$value("key").__().__(). + th().input("search_init").$type(InputType.text).$name("value").$value("value").__().__(). + th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain").__().__(). + __(). + __(). + __(); } catch(IOException e) { LOG.error("Error while reading "+confPath, e); - html.p()._("Sorry got an error while reading conf file. ",confPath); + html.p().__("Sorry got an error while reading conf file. ", confPath); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java index 568658e1587..4b9e6f41d07 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java @@ -34,13 +34,13 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.util.MRApps; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TD; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -60,12 +60,12 @@ public class CountersBlock extends HtmlBlock { @Override protected void render(Block html) { if (job == null) { html. - p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._(); + p().__("Sorry, no counters for nonexistent", $(JOB_ID, "job")).__(); return; } if (!$(TASK_ID).isEmpty() && task == null) { html. - p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._(); + p().__("Sorry, no counters for nonexistent", $(TASK_ID, "task")).__(); return; } @@ -75,7 +75,7 @@ public class CountersBlock extends HtmlBlock { type = $(JOB_ID, "the job"); } html. - p()._("Sorry it looks like ",type," has no counters.")._(); + p().__("Sorry it looks like ", type, " has no counters.").__(); return; } @@ -97,7 +97,7 @@ public class CountersBlock extends HtmlBlock { thead(). tr(). th(".group.ui-state-default", "Counter Group"). - th(".ui-state-default", "Counters")._()._(). + th(".ui-state-default", "Counters").__().__(). tbody(); for (CounterGroup g : total) { CounterGroup mg = map == null ? null : map.getGroup(g.getName()); @@ -109,7 +109,7 @@ public class CountersBlock extends HtmlBlock { TR>>>>>>> groupHeadRow = tbody. tr(). th().$title(g.getName()).$class("ui-state-default"). - _(fixGroupDisplayName(g.getDisplayName()))._(). + __(fixGroupDisplayName(g.getDisplayName())).__(). td().$class(C_TABLE). table(".dt-counters").$id(job.getID()+"."+g.getName()). thead(). @@ -120,20 +120,20 @@ public class CountersBlock extends HtmlBlock { } // Ditto TBODY>>>>>> group = groupHeadRow. - th(map == null ? "Value" : "Total")._()._(). + th(map == null ? "Value" : "Total").__().__(). tbody(); for (Counter counter : g) { // Ditto TR>>>>>>> groupRow = group. tr(); if (task == null && mg == null && rg == null) { - groupRow.td().$title(counter.getName())._(counter.getDisplayName()). - _(); + groupRow.td().$title(counter.getName()).__(counter.getDisplayName()). + __(); } else { groupRow.td().$title(counter.getName()). a(url(urlBase,urlId,g.getName(), counter.getName()), counter.getDisplayName()). - _(); + __(); } if (map != null) { Counter mc = mg == null ? null : mg.findCounter(counter.getName()); @@ -142,11 +142,11 @@ public class CountersBlock extends HtmlBlock { td(mc == null ? "0" : String.format("%,d", mc.getValue())). td(rc == null ? "0" : String.format("%,d", rc.getValue())); } - groupRow.td(String.format("%,d", counter.getValue()))._(); + groupRow.td(String.format("%,d", counter.getValue())).__(); } - group._()._()._()._(); + group.__().__().__().__(); } - tbody._()._()._(); + tbody.__().__().__(); } private void getCounters(AppContext ctx) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java index d7afcd84677..e7809075b73 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java @@ -25,7 +25,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class CountersPage extends AppView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); String tid = $(TASK_ID); @@ -39,7 +39,7 @@ public class CountersPage extends AppView { "{bJQueryUI:true, sDom:'t', iDisplayLength:-1}"); } - @Override protected void postHead(Page.HTML<_> html) { + @Override protected void postHead(Page.HTML<__> html) { html. style("#counters, .dt-counters { table-layout: fixed }", "#counters th { overflow: hidden; vertical-align: middle }", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java index 5163a011e9b..3dd64f58bf1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java @@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.webapp.view.InfoBlock; public class InfoPage extends AppView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setTitle("About the Application Master"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java index a599870b451..77ea55e03e3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java @@ -30,7 +30,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; import java.util.Date; import java.util.List; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; @@ -41,9 +40,9 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -60,14 +59,14 @@ public class JobBlock extends HtmlBlock { String jid = $(JOB_ID); if (jid.isEmpty()) { html. - p()._("Sorry, can't do anything without a JobID.")._(); + p().__("Sorry, can't do anything without a JobID.").__(); return; } JobId jobID = MRApps.toJobID(jid); Job job = appContext.getJob(jobID); if (job == null) { html. - p()._("Sorry, ", jid, " not found.")._(); + p().__("Sorry, ", jid, " not found.").__(); return; } @@ -77,15 +76,15 @@ public class JobBlock extends HtmlBlock { JobInfo jinfo = new JobInfo(job, true); info("Job Overview"). - _("Job Name:", jinfo.getName()). - _("User Name:", jinfo.getUserName()). - _("Queue Name:", jinfo.getQueueName()). - _("State:", jinfo.getState()). - _("Uberized:", jinfo.isUberized()). - _("Started:", new Date(jinfo.getStartTime())). - _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime())); + __("Job Name:", jinfo.getName()). + __("User Name:", jinfo.getUserName()). + __("Queue Name:", jinfo.getQueueName()). + __("State:", jinfo.getState()). + __("Uberized:", jinfo.isUberized()). + __("Started:", new Date(jinfo.getStartTime())). + __("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime())); DIV div = html. - _(InfoBlock.class). + __(InfoBlock.class). div(_INFO_WRAP); // MRAppMasters Table @@ -93,13 +92,13 @@ public class JobBlock extends HtmlBlock { table. tr(). th(amString). - _(). + __(). tr(). th(_TH, "Attempt Number"). th(_TH, "Start Time"). th(_TH, "Node"). th(_TH, "Logs"). - _(); + __(); for (AMInfo amInfo : amInfos) { AMAttemptInfo attempt = new AMAttemptInfo(amInfo, jinfo.getId(), jinfo.getUserName()); @@ -109,14 +108,14 @@ public class JobBlock extends HtmlBlock { td(new Date(attempt.getStartTime()).toString()). td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), - attempt.getNodeHttpAddress())._(). + attempt.getNodeHttpAddress()).__(). td().a(".logslink", url(attempt.getLogsLink()), - "logs")._(). - _(); + "logs").__(). + __(); } - table._(); - div._(); + table.__(); + div.__(); html.div(_INFO_WRAP). // Tasks table @@ -127,30 +126,30 @@ public class JobBlock extends HtmlBlock { th(_TH, "Total"). th(_TH, "Pending"). th(_TH, "Running"). - th(_TH, "Complete")._(). + th(_TH, "Complete").__(). tr(_ODD). th("Map"). td(). div(_PROGRESSBAR). $title(join(jinfo.getMapProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._(). - td().a(url("tasks", jid, "m", "ALL"),String.valueOf(jinfo.getMapsTotal()))._(). - td().a(url("tasks", jid, "m", "PENDING"),String.valueOf(jinfo.getMapsPending()))._(). - td().a(url("tasks", jid, "m", "RUNNING"),String.valueOf(jinfo.getMapsRunning()))._(). - td().a(url("tasks", jid, "m", "COMPLETED"),String.valueOf(jinfo.getMapsCompleted()))._()._(). + $style(join("width:", jinfo.getMapProgressPercent(), '%')).__().__().__(). + td().a(url("tasks", jid, "m", "ALL"), String.valueOf(jinfo.getMapsTotal())).__(). + td().a(url("tasks", jid, "m", "PENDING"), String.valueOf(jinfo.getMapsPending())).__(). + td().a(url("tasks", jid, "m", "RUNNING"), String.valueOf(jinfo.getMapsRunning())).__(). + td().a(url("tasks", jid, "m", "COMPLETED"), String.valueOf(jinfo.getMapsCompleted())).__().__(). tr(_EVEN). th("Reduce"). td(). div(_PROGRESSBAR). $title(join(jinfo.getReduceProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._(). - td().a(url("tasks", jid, "r", "ALL"),String.valueOf(jinfo.getReducesTotal()))._(). - td().a(url("tasks", jid, "r", "PENDING"),String.valueOf(jinfo.getReducesPending()))._(). - td().a(url("tasks", jid, "r", "RUNNING"),String.valueOf(jinfo.getReducesRunning()))._(). - td().a(url("tasks", jid, "r", "COMPLETED"),String.valueOf(jinfo.getReducesCompleted()))._()._() - ._(). + $style(join("width:", jinfo.getReduceProgressPercent(), '%')).__().__().__(). + td().a(url("tasks", jid, "r", "ALL"), String.valueOf(jinfo.getReducesTotal())).__(). + td().a(url("tasks", jid, "r", "PENDING"), String.valueOf(jinfo.getReducesPending())).__(). + td().a(url("tasks", jid, "r", "RUNNING"), String.valueOf(jinfo.getReducesRunning())).__(). + td().a(url("tasks", jid, "r", "COMPLETED"), String.valueOf(jinfo.getReducesCompleted())).__().__() + .__(). // Attempts table table("#job"). tr(). @@ -159,45 +158,45 @@ public class JobBlock extends HtmlBlock { th(_TH, "Running"). th(_TH, "Failed"). th(_TH, "Killed"). - th(_TH, "Successful")._(). + th(_TH, "Successful").__(). tr(_ODD). th("Maps"). td().a(url("attempts", jid, "m", TaskAttemptStateUI.NEW.toString()), - String.valueOf(jinfo.getNewMapAttempts()))._(). + String.valueOf(jinfo.getNewMapAttempts())).__(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.RUNNING.toString()), - String.valueOf(jinfo.getRunningMapAttempts()))._(). + String.valueOf(jinfo.getRunningMapAttempts())).__(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), - String.valueOf(jinfo.getFailedMapAttempts()))._(). + String.valueOf(jinfo.getFailedMapAttempts())).__(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), - String.valueOf(jinfo.getKilledMapAttempts()))._(). + String.valueOf(jinfo.getKilledMapAttempts())).__(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(jinfo.getSuccessfulMapAttempts()))._(). - _(). + String.valueOf(jinfo.getSuccessfulMapAttempts())).__(). + __(). tr(_EVEN). th("Reduces"). td().a(url("attempts", jid, "r", TaskAttemptStateUI.NEW.toString()), - String.valueOf(jinfo.getNewReduceAttempts()))._(). + String.valueOf(jinfo.getNewReduceAttempts())).__(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.RUNNING.toString()), - String.valueOf(jinfo.getRunningReduceAttempts()))._(). + String.valueOf(jinfo.getRunningReduceAttempts())).__(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), - String.valueOf(jinfo.getFailedReduceAttempts()))._(). + String.valueOf(jinfo.getFailedReduceAttempts())).__(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), - String.valueOf(jinfo.getKilledReduceAttempts()))._(). + String.valueOf(jinfo.getKilledReduceAttempts())).__(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(jinfo.getSuccessfulReduceAttempts()))._(). - _(). - _(). - _(); + String.valueOf(jinfo.getSuccessfulReduceAttempts())).__(). + __(). + __(). + __(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java index 983859e7d67..4d6a3e2b634 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java @@ -27,7 +27,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; -import org.apache.hadoop.mapreduce.v2.app.webapp.ConfBlock; import org.apache.hadoop.yarn.webapp.SubView; /** @@ -39,7 +38,7 @@ public class JobConfPage extends AppView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { String jobID = $(JOB_ID); set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID" : join("Configuration for MapReduce Job ", $(JOB_ID))); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java index 00f4750fd34..6508fb88adb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.webapp.SubView; public class JobPage extends AppView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { String jobID = $(JOB_ID); set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID" : join("MapReduce Job ", $(JOB_ID))); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java index 720219ece15..ff4bc00fe22 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java @@ -25,9 +25,9 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -53,34 +53,34 @@ public class JobsBlock extends HtmlBlock { th("Maps Completed"). th("Reduce Progress"). th("Reduces Total"). - th("Reduces Completed")._()._(). + th("Reduces Completed").__().__(). tbody(); for (Job j : appContext.getAllJobs().values()) { JobInfo job = new JobInfo(j, false); tbody. tr(). td(). - span().$title(String.valueOf(job.getId()))._(). // for sorting - a(url("job", job.getId()), job.getId())._(). + span().$title(String.valueOf(job.getId())).__(). // for sorting + a(url("job", job.getId()), job.getId()).__(). td(job.getName()). td(job.getState()). td(). - span().$title(job.getMapProgressPercent())._(). // for sorting + span().$title(job.getMapProgressPercent()).__(). // for sorting div(_PROGRESSBAR). $title(join(job.getMapProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", job.getMapProgressPercent(), '%'))._()._()._(). + $style(join("width:", job.getMapProgressPercent(), '%')).__().__().__(). td(String.valueOf(job.getMapsTotal())). td(String.valueOf(job.getMapsCompleted())). td(). - span().$title(job.getReduceProgressPercent())._(). // for sorting + span().$title(job.getReduceProgressPercent()).__(). // for sorting div(_PROGRESSBAR). $title(join(job.getReduceProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", job.getReduceProgressPercent(), '%'))._()._()._(). + $style(join("width:", job.getReduceProgressPercent(), '%')).__().__().__(). td(String.valueOf(job.getReducesTotal())). - td(String.valueOf(job.getReducesCompleted()))._(); + td(String.valueOf(job.getReducesCompleted())).__(); } - tbody._()._(); + tbody.__().__(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java index 4eed7e36600..58e1a4308bc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java @@ -25,8 +25,8 @@ import java.util.List; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -42,13 +42,13 @@ public class NavBlock extends HtmlBlock { div("#nav"). h3("Cluster"). ul(). - li().a(url(rmweb, "cluster", "cluster"), "About")._(). - li().a(url(rmweb, "cluster", "apps"), "Applications")._(). - li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._(). + li().a(url(rmweb, "cluster", "cluster"), "About").__(). + li().a(url(rmweb, "cluster", "apps"), "Applications").__(). + li().a(url(rmweb, "cluster", "scheduler"), "Scheduler").__().__(). h3("Application"). ul(). - li().a(url("app/info"), "About")._(). - li().a(url("app"), "Jobs")._()._(); + li().a(url("app/info"), "About").__(). + li().a(url("app"), "Jobs").__().__(); if (app.getJob() != null) { String jobid = MRApps.toString(app.getJob().getID()); List amInfos = app.getJob().getAMInfos(); @@ -58,31 +58,31 @@ public class NavBlock extends HtmlBlock { nav. h3("Job"). ul(). - li().a(url("job", jobid), "Overview")._(). - li().a(url("jobcounters", jobid), "Counters")._(). - li().a(url("conf", jobid), "Configuration")._(). - li().a(url("tasks", jobid, "m"), "Map tasks")._(). - li().a(url("tasks", jobid, "r"), "Reduce tasks")._(). + li().a(url("job", jobid), "Overview").__(). + li().a(url("jobcounters", jobid), "Counters").__(). + li().a(url("conf", jobid), "Configuration").__(). + li().a(url("tasks", jobid, "m"), "Map tasks").__(). + li().a(url("tasks", jobid, "r"), "Reduce tasks").__(). li().a(".logslink", url(MRWebAppUtil.getYARNWebappScheme(), nodeHttpAddress, "node", "containerlogs", thisAmInfo.getContainerId().toString(), app.getJob().getUserName()), - "AM Logs")._()._(); + "AM Logs").__().__(); if (app.getTask() != null) { String taskid = MRApps.toString(app.getTask().getID()); nav. h3("Task"). ul(). - li().a(url("task", taskid), "Task Overview")._(). - li().a(url("taskcounters", taskid), "Counters")._()._(); + li().a(url("task", taskid), "Task Overview").__(). + li().a(url("taskcounters", taskid), "Counters").__().__(); } } nav. h3("Tools"). ul(). - li().a("/conf", "Configuration")._(). - li().a("/logs", "Local logs")._(). - li().a("/stacks", "Server stacks")._(). - li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._(); + li().a("/conf", "Configuration").__(). + li().a("/logs", "Local logs").__(). + li().a("/stacks", "Server stacks").__(). + li().a("/jmx?qry=Hadoop:*", "Server metrics").__().__().__(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java index c4311e9ac32..02fb22603cb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java @@ -39,11 +39,11 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.util.MRApps; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -62,12 +62,12 @@ public class SingleCounterBlock extends HtmlBlock { @Override protected void render(Block html) { if (job == null) { html. - p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._(); + p().__("Sorry, no counters for nonexistent", $(JOB_ID, "job")).__(); return; } if (!$(TASK_ID).isEmpty() && task == null) { html. - p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._(); + p().__("Sorry, no counters for nonexistent", $(TASK_ID, "task")).__(); return; } @@ -79,7 +79,7 @@ public class SingleCounterBlock extends HtmlBlock { thead(). tr(). th(".ui-state-default", columnType). - th(".ui-state-default", "Value")._()._(). + th(".ui-state-default", "Value").__().__(). tbody(); for (Map.Entry entry : values.entrySet()) { TR>>> row = tbody.tr(); @@ -87,16 +87,16 @@ public class SingleCounterBlock extends HtmlBlock { String val = entry.getValue().toString(); if(task != null) { row.td(id); - row.td().br().$title(val)._()._(val)._(); + row.td().br().$title(val).__().__(val).__(); } else { row.td().a(url("singletaskcounter",entry.getKey(), - $(COUNTER_GROUP), $(COUNTER_NAME)), id)._(); - row.td().br().$title(val)._().a(url("singletaskcounter",entry.getKey(), - $(COUNTER_GROUP), $(COUNTER_NAME)), val)._(); + $(COUNTER_GROUP), $(COUNTER_NAME)), id).__(); + row.td().br().$title(val).__().a(url("singletaskcounter", entry.getKey(), + $(COUNTER_GROUP), $(COUNTER_NAME)), val).__(); } - row._(); + row.__(); } - tbody._()._()._(); + tbody.__().__().__(); } private void populateMembers(AppContext ctx) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java index 729b5a8c49c..6fc1f82ce7a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java @@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; -import org.apache.hadoop.mapreduce.v2.app.webapp.SingleCounterBlock; import org.apache.hadoop.yarn.webapp.SubView; /** @@ -33,7 +32,7 @@ public class SingleCounterPage extends AppView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); String tid = $(TASK_ID); String activeNav = "3"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java index 01c5b0d7b97..bd7f7a9707e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java @@ -38,11 +38,11 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -100,7 +100,7 @@ public class TaskPage extends AppView { .append(" }\n") .append("}\n"); - html.script().$type("text/javascript")._(script.toString())._(); + html.script().$type("text/javascript").__(script.toString()).__(); } TR>> tr = html.table("#attempts").thead().tr(); @@ -118,7 +118,7 @@ public class TaskPage extends AppView { tr.th(".actions", "Actions"); } - TBODY> tbody = tr._()._().tbody(); + TBODY> tbody = tr.__().__().tbody(); // Write all the data into a JavaScript array of arrays for JQuery // DataTables to display StringBuilder attemptsTableData = new StringBuilder("[\n"); @@ -178,9 +178,9 @@ public class TaskPage extends AppView { } attemptsTableData.append("]"); html.script().$type("text/javascript"). - _("var attemptsTableData=" + attemptsTableData)._(); + __("var attemptsTableData=" + attemptsTableData).__(); - tbody._()._(); + tbody.__().__(); } @@ -197,7 +197,7 @@ public class TaskPage extends AppView { } } - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:3}"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java index 78338ec4d61..8d92dd32ead 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java @@ -30,9 +30,9 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -65,7 +65,7 @@ public class TasksBlock extends HtmlBlock { th("State"). th("Start Time"). th("Finish Time"). - th("Elapsed Time")._()._(). + th("Elapsed Time").__().__(). tbody(); StringBuilder tasksTableData = new StringBuilder("[\n"); @@ -117,8 +117,8 @@ public class TasksBlock extends HtmlBlock { } tasksTableData.append("]"); html.script().$type("text/javascript"). - _("var tasksTableData=" + tasksTableData)._(); + __("var tasksTableData=" + tasksTableData).__(); - tbody._()._(); + tbody.__().__(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java index e2f12dc4d16..8fce395163b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class TasksPage extends AppView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "tasks"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index 6c5e6043888..caf8c6718a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -21,6 +21,9 @@ package org.apache.hadoop.mapreduce.jobhistory; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -62,6 +65,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; +import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -920,6 +924,104 @@ public class TestJobHistoryEventHandler { jheh.lastEventHandled.getHistoryEvent() instanceof JobUnsuccessfulCompletionEvent); } + + @Test (timeout=50000) + public void testSetTrackingURLAfterHistoryIsWritten() throws Exception { + TestParams t = new TestParams(true); + Configuration conf = new Configuration(); + + JHEvenHandlerForTest realJheh = + new JHEvenHandlerForTest(t.mockAppContext, 0, false); + JHEvenHandlerForTest jheh = spy(realJheh); + jheh.init(conf); + + try { + jheh.start(); + handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( + t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1))); + verify(jheh, times(0)).processDoneFiles(any(JobId.class)); + verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class)); + + // Job finishes and successfully writes history + handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( + TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), + new Counters(), new Counters()))); + + verify(jheh, times(1)).processDoneFiles(any(JobId.class)); + String historyUrl = MRWebAppUtil.getApplicationWebURLOnJHSWithScheme( + conf, t.mockAppContext.getApplicationID()); + verify(t.mockAppContext, times(1)).setHistoryUrl(historyUrl); + } finally { + jheh.stop(); + } + } + + @Test (timeout=50000) + public void testDontSetTrackingURLIfHistoryWriteFailed() throws Exception { + TestParams t = new TestParams(true); + Configuration conf = new Configuration(); + + JHEvenHandlerForTest realJheh = + new JHEvenHandlerForTest(t.mockAppContext, 0, false); + JHEvenHandlerForTest jheh = spy(realJheh); + jheh.init(conf); + + try { + jheh.start(); + doReturn(false).when(jheh).moveToDoneNow(any(Path.class), + any(Path.class)); + doNothing().when(jheh).moveTmpToDone(any(Path.class)); + handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( + t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1))); + verify(jheh, times(0)).processDoneFiles(any(JobId.class)); + verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class)); + + // Job finishes, but doesn't successfully write history + handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( + TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), + new Counters(), new Counters()))); + verify(jheh, times(1)).processDoneFiles(any(JobId.class)); + verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class)); + + } finally { + jheh.stop(); + } + } + @Test (timeout=50000) + public void testDontSetTrackingURLIfHistoryWriteThrows() throws Exception { + TestParams t = new TestParams(true); + Configuration conf = new Configuration(); + + JHEvenHandlerForTest realJheh = + new JHEvenHandlerForTest(t.mockAppContext, 0, false); + JHEvenHandlerForTest jheh = spy(realJheh); + jheh.init(conf); + + try { + jheh.start(); + doThrow(new YarnRuntimeException(new IOException())) + .when(jheh).processDoneFiles(any(JobId.class)); + handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( + t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1))); + verify(jheh, times(0)).processDoneFiles(any(JobId.class)); + verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class)); + + // Job finishes, but doesn't successfully write history + try { + handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( + TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), + new Counters(), new Counters()))); + throw new RuntimeException( + "processDoneFiles didn't throw, but should have"); + } catch (YarnRuntimeException yre) { + // Exception expected, do nothing + } + verify(jheh, times(1)).processDoneFiles(any(JobId.class)); + verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class)); + } finally { + jheh.stop(); + } + } } class JHEvenHandlerForTest extends JobHistoryEventHandler { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java index 4e31b63da53..06866337263 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java @@ -154,4 +154,14 @@ public class MockAppContext implements AppContext { return null; } + @Override + public String getHistoryUrl() { + return null; + } + + @Override + public void setHistoryUrl(String historyUrl) { + return; + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java index d122a9b2469..5af79d6f73b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java @@ -124,20 +124,20 @@ public class TestJobEndNotifier extends JobEndNotifier { proxyToUse.type() == Proxy.Type.DIRECT); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:1000"); setConf(conf); - Assert.assertTrue("Proxy should have been set but wasn't ", - proxyToUse.toString().equals("HTTP @ somehost:1000")); + Assert.assertEquals("Proxy should have been set but wasn't ", + "HTTP @ somehost:1000", proxyToUse.toString()); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "socks@somehost:1000"); setConf(conf); - Assert.assertTrue("Proxy should have been socks but wasn't ", - proxyToUse.toString().equals("SOCKS @ somehost:1000")); + Assert.assertEquals("Proxy should have been socks but wasn't ", + "SOCKS @ somehost:1000", proxyToUse.toString()); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "SOCKS@somehost:1000"); setConf(conf); - Assert.assertTrue("Proxy should have been socks but wasn't ", - proxyToUse.toString().equals("SOCKS @ somehost:1000")); + Assert.assertEquals("Proxy should have been socks but wasn't ", + "SOCKS @ somehost:1000", proxyToUse.toString()); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "sfafn@somehost:1000"); setConf(conf); - Assert.assertTrue("Proxy should have been http but wasn't ", - proxyToUse.toString().equals("HTTP @ somehost:1000")); + Assert.assertEquals("Proxy should have been http but wasn't ", + "HTTP @ somehost:1000", proxyToUse.toString()); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 8c7f0db4a9a..301d498891b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -896,5 +896,15 @@ public class TestRuntimeEstimators { public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() { return null; } + + @Override + public String getHistoryUrl() { + return null; + } + + @Override + public void setHistoryUrl(String historyUrl) { + return; + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java index 2147ec1b052..1827ce4d511 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java @@ -564,33 +564,13 @@ public class TestJobImpl { dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler); // replace the tasks with spied versions to return the right attempts - Map spiedTasks = new HashMap(); - List nodeReports = new ArrayList(); - Map nodeReportsToTaskIds = - new HashMap(); - for (Map.Entry e: job.tasks.entrySet()) { - TaskId taskId = e.getKey(); - Task task = e.getValue(); - if (taskId.getTaskType() == TaskType.MAP) { - // add an attempt to the task to simulate nodes - NodeId nodeId = mock(NodeId.class); - TaskAttempt attempt = mock(TaskAttempt.class); - when(attempt.getNodeId()).thenReturn(nodeId); - TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); - when(attempt.getID()).thenReturn(attemptId); - // create a spied task - Task spied = spy(task); - doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class)); - spiedTasks.put(taskId, spied); + Map spiedTasks = new HashMap<>(); + List nodeReports = new ArrayList<>(); + Map nodeReportsToTaskIds = new HashMap<>(); + + createSpiedMapTasks(nodeReportsToTaskIds, spiedTasks, job, + NodeState.UNHEALTHY, nodeReports); - // create a NodeReport based on the node id - NodeReport report = mock(NodeReport.class); - when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY); - when(report.getNodeId()).thenReturn(nodeId); - nodeReports.add(report); - nodeReportsToTaskIds.put(report, taskId); - } - } // replace the tasks with the spied tasks job.tasks.putAll(spiedTasks); @@ -641,6 +621,82 @@ public class TestJobImpl { commitHandler.stop(); } + @Test + public void testJobNCompletedWhenAllReducersAreFinished() + throws Exception { + testJobCompletionWhenReducersAreFinished(true); + } + + @Test + public void testJobNotCompletedWhenAllReducersAreFinished() + throws Exception { + testJobCompletionWhenReducersAreFinished(false); + } + + private void testJobCompletionWhenReducersAreFinished(boolean killMappers) + throws InterruptedException, BrokenBarrierException { + Configuration conf = new Configuration(); + conf.setBoolean(MRJobConfig.FINISH_JOB_WHEN_REDUCERS_DONE, killMappers); + conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); + conf.setInt(MRJobConfig.NUM_REDUCES, 1); + DrainDispatcher dispatcher = new DrainDispatcher(); + dispatcher.init(conf); + final List killedEvents = + Collections.synchronizedList(new ArrayList()); + dispatcher.register(TaskEventType.class, new EventHandler() { + @Override + public void handle(TaskEvent event) { + if (event.getType() == TaskEventType.T_KILL) { + killedEvents.add(event); + } + } + }); + dispatcher.start(); + CyclicBarrier syncBarrier = new CyclicBarrier(2); + OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true); + CommitterEventHandler commitHandler = + createCommitterEventHandler(dispatcher, committer); + commitHandler.init(conf); + commitHandler.start(); + + final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); + + // replace the tasks with spied versions to return the right attempts + Map spiedTasks = new HashMap<>(); + List nodeReports = new ArrayList<>(); + Map nodeReportsToTaskIds = new HashMap<>(); + + createSpiedMapTasks(nodeReportsToTaskIds, spiedTasks, job, + NodeState.RUNNING, nodeReports); + + // replace the tasks with the spied tasks + job.tasks.putAll(spiedTasks); + + // finish reducer + for (TaskId taskId: job.tasks.keySet()) { + if (taskId.getTaskType() == TaskType.REDUCE) { + job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED)); + } + } + + dispatcher.await(); + + /* + * StubbedJob cannot finish in this test - we'd have to generate the + * necessary events in this test manually, but that wouldn't add too + * much value. Instead, we validate the T_KILL events. + */ + if (killMappers) { + Assert.assertEquals("Number of killed events", 2, killedEvents.size()); + Assert.assertEquals("AttemptID", "task_1234567890000_0001_m_000000", + killedEvents.get(0).getTaskID().toString()); + Assert.assertEquals("AttemptID", "task_1234567890000_0001_m_000001", + killedEvents.get(1).getTaskID().toString()); + } else { + Assert.assertEquals("Number of killed events", 0, killedEvents.size()); + } + } + public static void main(String[] args) throws Exception { TestJobImpl t = new TestJobImpl(); t.testJobNoTasks(); @@ -1021,6 +1077,37 @@ public class TestJobImpl { Assert.assertEquals(state, job.getInternalState()); } + private void createSpiedMapTasks(Map + nodeReportsToTaskIds, Map spiedTasks, JobImpl job, + NodeState nodeState, List nodeReports) { + for (Map.Entry e: job.tasks.entrySet()) { + TaskId taskId = e.getKey(); + Task task = e.getValue(); + if (taskId.getTaskType() == TaskType.MAP) { + // add an attempt to the task to simulate nodes + NodeId nodeId = mock(NodeId.class); + TaskAttempt attempt = mock(TaskAttempt.class); + when(attempt.getNodeId()).thenReturn(nodeId); + TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0); + when(attempt.getID()).thenReturn(attemptId); + // create a spied task + Task spied = spy(task); + Map attemptMap = new HashMap<>(); + attemptMap.put(attemptId, attempt); + when(spied.getAttempts()).thenReturn(attemptMap); + doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class)); + spiedTasks.put(taskId, spied); + + // create a NodeReport based on the node id + NodeReport report = mock(NodeReport.class); + when(report.getNodeState()).thenReturn(nodeState); + when(report.getNodeId()).thenReturn(nodeId); + nodeReports.add(report); + nodeReportsToTaskIds.put(report, taskId); + } + } + } + private static class JobSubmittedEventHandler implements EventHandler { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java index 1520929325e..a93bf88df21 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java @@ -31,6 +31,8 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest; @@ -462,6 +464,7 @@ public class TestContainerLauncher { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws IOException, IOException { @@ -506,5 +509,11 @@ public class TestContainerLauncher { throws YarnException, IOException { return null; } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return null; + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java index 225570ca8b5..53af631f9db 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java @@ -47,6 +47,8 @@ import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher.EventType; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -465,6 +467,7 @@ public class TestContainerLauncherImpl { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException, IOException { @@ -511,6 +514,12 @@ public class TestContainerLauncherImpl { throws YarnException, IOException { return null; } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return null; + } } @SuppressWarnings("serial") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index bc05c623e2f..6c51626af9d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -329,8 +329,7 @@ public class TestRMContainerAllocator { for(TaskAttemptContainerAssignedEvent event : assigned) { if(event.getTaskAttemptID().equals(event3.getAttemptID())) { assigned.remove(event); - Assert.assertTrue( - event.getContainer().getNodeId().getHost().equals("h3")); + Assert.assertEquals("h3", event.getContainer().getNodeId().getHost()); break; } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml index db8ae49d9bd..b88b01270ff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml @@ -46,10 +46,6 @@ org.apache.hadoop hadoop-mapreduce-client-core - - org.apache.hadoop - hadoop-yarn-server-common - diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index fbf6806bdbd..4245daf6ace 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -213,15 +213,15 @@ public class Cluster { public Job getJob(JobID jobId) throws IOException, InterruptedException { JobStatus status = client.getJobStatus(jobId); if (status != null) { - final JobConf conf = new JobConf(); - final Path jobPath = new Path(client.getFilesystemName(), - status.getJobFile()); - final FileSystem fs = FileSystem.get(jobPath.toUri(), getConf()); + JobConf conf; try { - conf.addResource(fs.open(jobPath), jobPath.toString()); - } catch (FileNotFoundException fnf) { - if (LOG.isWarnEnabled()) { - LOG.warn("Job conf missing on cluster", fnf); + conf = new JobConf(status.getJobFile()); + } catch (RuntimeException ex) { + // If job file doesn't exist it means we can't find the job + if (ex.getCause() instanceof FileNotFoundException) { + return null; + } else { + throw ex; } } return Job.getInstance(this, status, conf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 6ade376f01d..e5ff26d93d8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -52,7 +52,6 @@ import org.apache.hadoop.mapred.QueueACL; import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName; -import org.apache.hadoop.mapreduce.counters.Limits; import org.apache.hadoop.mapreduce.filecache.DistributedCache; import org.apache.hadoop.mapreduce.protocol.ClientProtocol; import org.apache.hadoop.mapreduce.security.TokenCache; @@ -246,7 +245,6 @@ class JobSubmitter { // Write job file to submit dir writeConf(conf, submitJobFile); - Limits.reset(conf); // // Now, actually submit the job (using the submit name) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index cfc1bcc72a6..2023ba3b1d2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -431,7 +431,7 @@ public interface MRJobConfig { public static final String JOB_ACL_MODIFY_JOB = "mapreduce.job.acl-modify-job"; public static final String DEFAULT_JOB_ACL_MODIFY_JOB = " "; - + public static final String JOB_RUNNING_MAP_LIMIT = "mapreduce.job.running.map.limit"; public static final int DEFAULT_JOB_RUNNING_MAP_LIMIT = 0; @@ -1033,4 +1033,8 @@ public interface MRJobConfig { String MR_JOB_REDACTED_PROPERTIES = "mapreduce.job.redacted-properties"; String MR_JOB_SEND_TOKEN_CONF = "mapreduce.job.send-token-conf"; + + String FINISH_JOB_WHEN_REDUCERS_DONE = + "mapreduce.job.finish-when-all-reducers-done"; + boolean DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE = true; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java index e6e74da1190..4ab7e8990c6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java @@ -307,10 +307,6 @@ public abstract class AbstractCounters 0) { limits.checkGroups(groups.size() + 1); G group = groupFactory.newGenericGroup( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java index 9546c8d7632..34b0fae6e82 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java @@ -123,16 +123,4 @@ public class Limits { public synchronized LimitExceededException violation() { return firstViolation; } - - // This allows initialization of global settings and not for an instance - public static synchronized void reset(Configuration conf) { - isInited = false; - init(conf); - } - - // This allows resetting of an instance to allow reuse - public synchronized void reset() { - totalCounters = 0; - firstViolation = null; - } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java index 9f8edb5df0d..ada14db944c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/filecache/ClientDistributedCacheManager.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.security.Credentials; @@ -293,7 +292,6 @@ public class ClientDistributedCacheManager { private static boolean checkPermissionOfOther(FileSystem fs, Path path, FsAction action, Map statCache) throws IOException { FileStatus status = getFileStatus(fs, path.toUri(), statCache); - FsPermission perms = status.getPermission(); // Encrypted files are always treated as private. This stance has two // important side effects. The first is that the encrypted files will be @@ -302,8 +300,8 @@ public class ClientDistributedCacheManager { // world readable permissions that is stored in an encryption zone from // being localized as a publicly shared file with world readable // permissions. - if (!perms.getEncryptedBit()) { - FsAction otherAction = perms.getOtherAction(); + if (!status.isEncrypted()) { + FsAction otherAction = status.getPermission().getOtherAction(); if (otherAction.implies(action)) { return true; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java index e5db2e5a1c2..5f10fdfa97c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.mapreduce.jobhistory; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; import java.util.HashMap; @@ -25,8 +24,6 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -36,7 +33,6 @@ import org.apache.hadoop.mapred.TaskStatus; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.counters.Limits; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo; import org.apache.hadoop.mapreduce.util.HostUtil; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @@ -49,7 +45,6 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @InterfaceAudience.Private @InterfaceStability.Unstable public class HistoryViewer { - private static final Log LOG = LogFactory.getLog(HistoryViewer.class); private FileSystem fs; private JobInfo job; private HistoryViewerPrinter jhvp; @@ -89,17 +84,6 @@ public class HistoryViewer { System.err.println("Ignore unrecognized file: " + jobFile.getName()); throw new IOException(errorMsg); } - final Path jobConfPath = new Path(jobFile.getParent(), jobDetails[0] - + "_" + jobDetails[1] + "_" + jobDetails[2] + "_conf.xml"); - final Configuration jobConf = new Configuration(conf); - try { - jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString()); - Limits.reset(jobConf); - } catch (FileNotFoundException fnf) { - if (LOG.isWarnEnabled()) { - LOG.warn("Missing job conf in history", fnf); - } - } JobHistoryParser parser = new JobHistoryParser(fs, jobFile); job = parser.parse(); String scheme = WebAppUtils.getHttpSchemePrefix(fs.getConf()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java index 1732d9130a8..175296725ff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java @@ -60,7 +60,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { int[] physMemKbytes; private static final Counters EMPTY_COUNTERS = new Counters(); - /** + /** * Create an event to record the unsuccessful completion of attempts * @param id Attempt ID * @param taskType Type of the task @@ -74,7 +74,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { * @param allSplits the "splits", or a pixelated graph of various * measurable worker node state variables against progress. * Currently there are four; wallclock time, CPU time, - * virtual memory and physical memory. + * virtual memory and physical memory. */ public TaskAttemptUnsuccessfulCompletionEvent (TaskAttemptID id, TaskType taskType, @@ -101,7 +101,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits); } - /** + /** * @deprecated please use the constructor with an additional * argument, an array of splits arrays instead. See * {@link org.apache.hadoop.mapred.ProgressSplitsBlock} @@ -117,19 +117,19 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { */ public TaskAttemptUnsuccessfulCompletionEvent (TaskAttemptID id, TaskType taskType, - String status, long finishTime, + String status, long finishTime, String hostname, String error) { this(id, taskType, status, finishTime, hostname, -1, "", error, EMPTY_COUNTERS, null); } - + public TaskAttemptUnsuccessfulCompletionEvent (TaskAttemptID id, TaskType taskType, String status, long finishTime, String hostname, int port, String rackName, String error, int[][] allSplits) { this(id, taskType, status, finishTime, hostname, port, - rackName, error, EMPTY_COUNTERS, null); + rackName, error, EMPTY_COUNTERS, allSplits); } TaskAttemptUnsuccessfulCompletionEvent() {} @@ -162,9 +162,9 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { } return datum; } - - - + + + public void setDatum(Object odatum) { this.datum = (TaskAttemptUnsuccessfulCompletion)odatum; @@ -208,12 +208,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { public String getHostname() { return hostname; } /** Get the rpc port for the host where the attempt executed */ public int getPort() { return port; } - + /** Get the rack name of the node where the attempt ran */ public String getRackName() { return rackName == null ? null : rackName.toString(); } - + /** Get the error string */ public String getError() { return error.toString(); } /** Get the task status */ @@ -224,12 +224,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent { Counters getCounters() { return counters; } /** Get the event type */ public EventType getEventType() { - // Note that the task type can be setup/map/reduce/cleanup but the + // Note that the task type can be setup/map/reduce/cleanup but the // attempt-type can only be map/reduce. // find out if the task failed or got killed boolean failed = TaskStatus.State.FAILED.toString().equals(getTaskStatus()); - return getTaskId().getTaskType() == TaskType.MAP - ? (failed + return getTaskId().getTaskType() == TaskType.MAP + ? (failed ? EventType.MAP_ATTEMPT_FAILED : EventType.MAP_ATTEMPT_KILLED) : (failed diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index 101aa07e681..ee9b906faa9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1125,6 +1125,14 @@ + + mapreduce.job.finish-when-all-reducers-done + true + Specifies whether the job should complete once all reducers + have finished, regardless of whether there are still running mappers. + + + mapreduce.job.token.tracking.ids.enabled false diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java index fb2d5e0cd1d..796bbee2920 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestQueue.java @@ -74,13 +74,13 @@ public class TestQueue { assertTrue(root.getChildren().size() == 2); Iterator iterator = root.getChildren().iterator(); Queue firstSubQueue = iterator.next(); - assertTrue(firstSubQueue.getName().equals("first")); + assertEquals("first", firstSubQueue.getName()); assertEquals( firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job") .toString(), "Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue = iterator.next(); - assertTrue(secondSubQueue.getName().equals("second")); + assertEquals("second", secondSubQueue.getName()); assertEquals(secondSubQueue.getProperties().getProperty("key"), "value"); assertEquals(secondSubQueue.getProperties().getProperty("key1"), "value1"); // test status @@ -207,13 +207,13 @@ public class TestQueue { assertTrue(root.getChildren().size() == 2); Iterator iterator = root.getChildren().iterator(); Queue firstSubQueue = iterator.next(); - assertTrue(firstSubQueue.getName().equals("first")); + assertEquals("first", firstSubQueue.getName()); assertEquals( firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job") .toString(), "Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue = iterator.next(); - assertTrue(secondSubQueue.getName().equals("second")); + assertEquals("second", secondSubQueue.getName()); assertEquals(firstSubQueue.getState().getStateName(), "running"); assertEquals(secondSubQueue.getState().getStateName(), "stopped"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index 4deb9ae2f0d..bbb126dff83 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -18,7 +18,6 @@ package org.apache.hadoop.mapreduce.v2.hs; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.UnknownHostException; import java.util.ArrayList; @@ -35,7 +34,6 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobACLsManager; import org.apache.hadoop.mapred.TaskCompletionEvent; @@ -43,7 +41,6 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.TypeConverter; -import org.apache.hadoop.mapreduce.counters.Limits; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; @@ -353,19 +350,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job if (historyFileAbsolute != null) { JobHistoryParser parser = null; try { - final FileSystem fs = historyFileAbsolute.getFileSystem(conf); parser = createJobHistoryParser(historyFileAbsolute); - final Path jobConfPath = new Path(historyFileAbsolute.getParent(), - JobHistoryUtils.getIntermediateConfFileName(jobId)); - final Configuration conf = new Configuration(); - try { - conf.addResource(fs.open(jobConfPath), jobConfPath.toString()); - Limits.reset(conf); - } catch (FileNotFoundException fnf) { - if (LOG.isWarnEnabled()) { - LOG.warn("Missing job conf in history", fnf); - } - } this.jobInfo = parser.parse(); } catch (IOException e) { throw new YarnRuntimeException("Could not load history file " diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index c5a40b26e3c..2671df407ff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -407,4 +407,14 @@ public class JobHistory extends AbstractService implements HistoryContext { public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() { return null; } + + @Override + public String getHistoryUrl() { + return null; + } + + @Override + public void setHistoryUrl(String historyUrl) { + return; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java index f607599a922..d544c6ba7cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java @@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; -import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.SubView; @@ -36,7 +35,7 @@ public class HsAboutPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); //override the nav config from commonPReHead set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); @@ -49,9 +48,9 @@ public class HsAboutPage extends HsView { @Override protected Class content() { HistoryInfo info = new HistoryInfo(); info("History Server"). - _("BuildVersion", info.getHadoopBuildVersion() + __("BuildVersion", info.getHadoopBuildVersion() + " on " + info.getHadoopVersionBuiltOn()). - _("History Server started on", Times.format(info.getStartedOn())); + __("History Server started on", Times.format(info.getStartedOn())); return InfoBlock.class; } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java index 8431e2209b1..c08ee5c2f59 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsConfPage.java @@ -39,7 +39,7 @@ public class HsConfPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { String jobID = $(JOB_ID); set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID" : join("Configuration for MapReduce Job ", $(JOB_ID))); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java index e70a668be0d..1632a97ba86 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsCountersPage.java @@ -32,7 +32,7 @@ public class HsCountersPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setActiveNavColumnForTask(); set(DATATABLES_SELECTOR, "#counters .dt-counters"); @@ -44,7 +44,7 @@ public class HsCountersPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#postHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void postHead(Page.HTML<_> html) { + @Override protected void postHead(Page.HTML<__> html) { html. style("#counters, .dt-counters { table-layout: fixed }", "#counters th { overflow: hidden; vertical-align: middle }", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java index 0d5b03a3ac0..18040f00440 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java @@ -43,9 +43,9 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.ResponseInfo; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -69,38 +69,38 @@ public class HsJobBlock extends HtmlBlock { String jid = $(JOB_ID); if (jid.isEmpty()) { html. - p()._("Sorry, can't do anything without a JobID.")._(); + p().__("Sorry, can't do anything without a JobID.").__(); return; } JobId jobID = MRApps.toJobID(jid); Job j = appContext.getJob(jobID); if (j == null) { - html.p()._("Sorry, ", jid, " not found.")._(); + html.p().__("Sorry, ", jid, " not found.").__(); return; } if(j instanceof UnparsedJob) { final int taskCount = j.getTotalMaps() + j.getTotalReduces(); UnparsedJob oversizedJob = (UnparsedJob) j; - html.p()._("The job has a total of " + taskCount + " tasks. ") - ._("Any job larger than " + oversizedJob.getMaxTasksAllowed() + - " will not be loaded.")._(); - html.p()._("You can either use the CLI tool: 'mapred job -history'" + html.p().__("The job has a total of " + taskCount + " tasks. ") + .__("Any job larger than " + oversizedJob.getMaxTasksAllowed() + + " will not be loaded.").__(); + html.p().__("You can either use the CLI tool: 'mapred job -history'" + " to view large jobs or adjust the property " + - JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX + ".")._(); + JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX + ".").__(); return; } List amInfos = j.getAMInfos(); JobInfo job = new JobInfo(j); ResponseInfo infoBlock = info("Job Overview"). - _("Job Name:", job.getName()). - _("User Name:", job.getUserName()). - _("Queue:", job.getQueueName()). - _("State:", job.getState()). - _("Uberized:", job.isUber()). - _("Submitted:", new Date(job.getSubmitTime())). - _("Started:", job.getStartTimeStr()). - _("Finished:", new Date(job.getFinishTime())). - _("Elapsed:", StringUtils.formatTime( + __("Job Name:", job.getName()). + __("User Name:", job.getUserName()). + __("Queue:", job.getQueueName()). + __("State:", job.getState()). + __("Uberized:", job.isUber()). + __("Submitted:", new Date(job.getSubmitTime())). + __("Started:", job.getStartTimeStr()). + __("Finished:", new Date(job.getFinishTime())). + __("Elapsed:", StringUtils.formatTime( Times.elapsed(job.getStartTime(), job.getFinishTime(), false))); String amString = @@ -117,19 +117,19 @@ public class HsJobBlock extends HtmlBlock { } if(job.getNumMaps() > 0) { - infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime())); + infoBlock.__("Average Map Time", StringUtils.formatTime(job.getAvgMapTime())); } if(job.getNumReduces() > 0) { - infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime())); - infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime())); - infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime())); + infoBlock.__("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime())); + infoBlock.__("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime())); + infoBlock.__("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime())); } for (ConfEntryInfo entry : job.getAcls()) { - infoBlock._("ACL "+entry.getName()+":", entry.getValue()); + infoBlock.__("ACL "+entry.getName()+":", entry.getValue()); } DIV div = html. - _(InfoBlock.class). + __(InfoBlock.class). div(_INFO_WRAP); // MRAppMasters Table @@ -137,13 +137,13 @@ public class HsJobBlock extends HtmlBlock { table. tr(). th(amString). - _(). + __(). tr(). th(_TH, "Attempt Number"). th(_TH, "Start Time"). th(_TH, "Node"). th(_TH, "Logs"). - _(); + __(); boolean odd = false; for (AMInfo amInfo : amInfos) { AMAttemptInfo attempt = new AMAttemptInfo(amInfo, @@ -153,13 +153,13 @@ public class HsJobBlock extends HtmlBlock { td(new Date(attempt.getStartTime()).toString()). td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), - attempt.getNodeHttpAddress())._(). + attempt.getNodeHttpAddress()).__(). td().a(".logslink", url(attempt.getLogsLink()), - "logs")._(). - _(); + "logs").__(). + __(); } - table._(); - div._(); + table.__(); + div.__(); html.div(_INFO_WRAP). @@ -169,18 +169,18 @@ public class HsJobBlock extends HtmlBlock { tr(). th(_TH, "Task Type"). th(_TH, "Total"). - th(_TH, "Complete")._(). + th(_TH, "Complete").__(). tr(_ODD). th(). - a(url("tasks", jid, "m"), "Map")._(). + a(url("tasks", jid, "m"), "Map").__(). td(String.valueOf(String.valueOf(job.getMapsTotal()))). - td(String.valueOf(String.valueOf(job.getMapsCompleted())))._(). + td(String.valueOf(String.valueOf(job.getMapsCompleted()))).__(). tr(_EVEN). th(). - a(url("tasks", jid, "r"), "Reduce")._(). + a(url("tasks", jid, "r"), "Reduce").__(). td(String.valueOf(String.valueOf(job.getReducesTotal()))). - td(String.valueOf(String.valueOf(job.getReducesCompleted())))._() - ._(). + td(String.valueOf(String.valueOf(job.getReducesCompleted()))).__() + .__(). // Attempts table table("#job"). @@ -188,33 +188,33 @@ public class HsJobBlock extends HtmlBlock { th(_TH, "Attempt Type"). th(_TH, "Failed"). th(_TH, "Killed"). - th(_TH, "Successful")._(). + th(_TH, "Successful").__(). tr(_ODD). th("Maps"). td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), - String.valueOf(job.getFailedMapAttempts()))._(). + String.valueOf(job.getFailedMapAttempts())).__(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), - String.valueOf(job.getKilledMapAttempts()))._(). + String.valueOf(job.getKilledMapAttempts())).__(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(job.getSuccessfulMapAttempts()))._(). - _(). + String.valueOf(job.getSuccessfulMapAttempts())).__(). + __(). tr(_EVEN). th("Reduces"). td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), - String.valueOf(job.getFailedReduceAttempts()))._(). + String.valueOf(job.getFailedReduceAttempts())).__(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), - String.valueOf(job.getKilledReduceAttempts()))._(). + String.valueOf(job.getKilledReduceAttempts())).__(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(job.getSuccessfulReduceAttempts()))._(). - _(). - _(). - _(); + String.valueOf(job.getSuccessfulReduceAttempts())).__(). + __(). + __(). + __(); } static String addTaskLinks(String text) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java index 4c81a139895..f40c878336e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobPage.java @@ -34,7 +34,7 @@ public class HsJobPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { String jobID = $(JOB_ID); set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID" : join("MapReduce Job ", $(JOB_ID))); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java index b234ca3df03..ef563f6c9f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java @@ -27,10 +27,10 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.util.Times; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -69,7 +69,7 @@ public class HsJobsBlock extends HtmlBlock { th("Maps Completed"). th("Reduces Total"). th("Reduces Completed"). - th("Elapsed Time")._()._(). + th("Elapsed Time").__().__(). tbody(); LOG.info("Getting list of all Jobs."); // Write all the data into a JavaScript array of arrays for JQuery @@ -105,38 +105,38 @@ public class HsJobsBlock extends HtmlBlock { } jobsTableData.append("]"); html.script().$type("text/javascript"). - _("var jobsTableData=" + jobsTableData)._(); - tbody._(). + __("var jobsTableData=" + jobsTableData).__(); + tbody.__(). tfoot(). tr(). th().input("search_init").$type(InputType.text) - .$name("submit_time").$value("Submit Time")._()._(). + .$name("submit_time").$value("Submit Time").__().__(). th().input("search_init").$type(InputType.text) - .$name("start_time").$value("Start Time")._()._(). + .$name("start_time").$value("Start Time").__().__(). th().input("search_init").$type(InputType.text) - .$name("finish_time").$value("Finish Time")._()._(). + .$name("finish_time").$value("Finish Time").__().__(). th().input("search_init").$type(InputType.text) - .$name("job_id").$value("Job ID")._()._(). + .$name("job_id").$value("Job ID").__().__(). th().input("search_init").$type(InputType.text) - .$name("name").$value("Name")._()._(). + .$name("name").$value("Name").__().__(). th().input("search_init").$type(InputType.text) - .$name("user").$value("User")._()._(). + .$name("user").$value("User").__().__(). th().input("search_init").$type(InputType.text) - .$name("queue").$value("Queue")._()._(). + .$name("queue").$value("Queue").__().__(). th().input("search_init").$type(InputType.text) - .$name("state").$value("State")._()._(). + .$name("state").$value("State").__().__(). th().input("search_init").$type(InputType.text) - .$name("maps_total").$value("Maps Total")._()._(). + .$name("maps_total").$value("Maps Total").__().__(). th().input("search_init").$type(InputType.text). - $name("maps_completed").$value("Maps Completed")._()._(). + $name("maps_completed").$value("Maps Completed").__().__(). th().input("search_init").$type(InputType.text). - $name("reduces_total").$value("Reduces Total")._()._(). + $name("reduces_total").$value("Reduces Total").__().__(). th().input("search_init").$type(InputType.text). - $name("reduces_completed").$value("Reduces Completed")._()._(). + $name("reduces_completed").$value("Reduces Completed").__().__(). th().input("search_init").$type(InputType.text). - $name("elapsed_time").$value("Elapsed Time")._()._(). - _(). - _(). - _(); + $name("elapsed_time").$value("Elapsed Time").__().__(). + __(). + __(). + __(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java index f483dc96b04..2bee3ba9971 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsLogsPage.java @@ -26,7 +26,7 @@ public class HsLogsPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setActiveNavColumnForTask(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java index 7e49d520e7f..9ef5a0f8cff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsNavBlock.java @@ -20,8 +20,8 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp; import org.apache.hadoop.mapreduce.v2.app.webapp.App; import org.apache.hadoop.mapreduce.v2.util.MRApps; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -43,33 +43,33 @@ public class HsNavBlock extends HtmlBlock { div("#nav"). h3("Application"). ul(). - li().a(url("about"), "About")._(). - li().a(url("app"), "Jobs")._()._(); + li().a(url("about"), "About").__(). + li().a(url("app"), "Jobs").__().__(); if (app.getJob() != null) { String jobid = MRApps.toString(app.getJob().getID()); nav. h3("Job"). ul(). - li().a(url("job", jobid), "Overview")._(). - li().a(url("jobcounters", jobid), "Counters")._(). - li().a(url("conf", jobid), "Configuration")._(). - li().a(url("tasks", jobid, "m"), "Map tasks")._(). - li().a(url("tasks", jobid, "r"), "Reduce tasks")._()._(); + li().a(url("job", jobid), "Overview").__(). + li().a(url("jobcounters", jobid), "Counters").__(). + li().a(url("conf", jobid), "Configuration").__(). + li().a(url("tasks", jobid, "m"), "Map tasks").__(). + li().a(url("tasks", jobid, "r"), "Reduce tasks").__().__(); if (app.getTask() != null) { String taskid = MRApps.toString(app.getTask().getID()); nav. h3("Task"). ul(). - li().a(url("task", taskid), "Task Overview")._(). - li().a(url("taskcounters", taskid), "Counters")._()._(); + li().a(url("task", taskid), "Task Overview").__(). + li().a(url("taskcounters", taskid), "Counters").__().__(); } } nav. h3("Tools"). ul(). - li().a("/conf", "Configuration")._(). - li().a("/logs", "Local logs")._(). - li().a("/stacks", "Server stacks")._(). - li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._(); + li().a("/conf", "Configuration").__(). + li().a("/logs", "Local logs").__(). + li().a("/stacks", "Server stacks").__(). + li().a("/jmx?qry=Hadoop:*", "Server metrics").__().__().__(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java index 5f97b8fdb20..bc2c2c857ac 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsSingleCounterPage.java @@ -32,7 +32,7 @@ public class HsSingleCounterPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setActiveNavColumnForTask(); set(DATATABLES_ID, "singleCounter"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java index 6403e3bced6..c5117edb9e0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java @@ -39,16 +39,15 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; -import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TFOOT; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import com.google.common.base.Joiner; import com.google.inject.Inject; /** @@ -110,7 +109,7 @@ public class HsTaskPage extends HsView { headRow.th("Elapsed Time"). th(".note", "Note"); - TBODY> tbody = headRow._()._().tbody(); + TBODY> tbody = headRow.__().__().tbody(); // Write all the data into a JavaScript array of arrays for JQuery // DataTables to display StringBuilder attemptsTableData = new StringBuilder("[\n"); @@ -182,55 +181,55 @@ public class HsTaskPage extends HsView { } attemptsTableData.append("]"); html.script().$type("text/javascript"). - _("var attemptsTableData=" + attemptsTableData)._(); + __("var attemptsTableData=" + attemptsTableData).__(); - TR>> footRow = tbody._().tfoot().tr(); + TR>> footRow = tbody.__().tfoot().tr(); footRow. th().input("search_init").$type(InputType.text). - $name("attempt_name").$value("Attempt")._()._(). + $name("attempt_name").$value("Attempt").__().__(). th().input("search_init").$type(InputType.text). - $name("attempt_state").$value("State")._()._(). + $name("attempt_state").$value("State").__().__(). th().input("search_init").$type(InputType.text). - $name("attempt_status").$value("Status")._()._(). + $name("attempt_status").$value("Status").__().__(). th().input("search_init").$type(InputType.text). - $name("attempt_node").$value("Node")._()._(). + $name("attempt_node").$value("Node").__().__(). th().input("search_init").$type(InputType.text). - $name("attempt_node").$value("Logs")._()._(). + $name("attempt_node").$value("Logs").__().__(). th().input("search_init").$type(InputType.text). - $name("attempt_start_time").$value("Start Time")._()._(); + $name("attempt_start_time").$value("Start Time").__().__(); if(type == TaskType.REDUCE) { footRow. th().input("search_init").$type(InputType.text). - $name("shuffle_time").$value("Shuffle Time")._()._(); + $name("shuffle_time").$value("Shuffle Time").__().__(); footRow. th().input("search_init").$type(InputType.text). - $name("merge_time").$value("Merge Time")._()._(); + $name("merge_time").$value("Merge Time").__().__(); } footRow. th().input("search_init").$type(InputType.text). - $name("attempt_finish").$value("Finish Time")._()._(); + $name("attempt_finish").$value("Finish Time").__().__(); if(type == TaskType.REDUCE) { footRow. th().input("search_init").$type(InputType.text). - $name("elapsed_shuffle_time").$value("Elapsed Shuffle Time")._()._(); + $name("elapsed_shuffle_time").$value("Elapsed Shuffle Time").__().__(); footRow. th().input("search_init").$type(InputType.text). - $name("elapsed_merge_time").$value("Elapsed Merge Time")._()._(); + $name("elapsed_merge_time").$value("Elapsed Merge Time").__().__(); footRow. th().input("search_init").$type(InputType.text). - $name("elapsed_reduce_time").$value("Elapsed Reduce Time")._()._(); + $name("elapsed_reduce_time").$value("Elapsed Reduce Time").__().__(); } footRow. th().input("search_init").$type(InputType.text). - $name("attempt_elapsed").$value("Elapsed Time")._()._(). + $name("attempt_elapsed").$value("Elapsed Time").__().__(). th().input("search_init").$type(InputType.text). - $name("note").$value("Note")._()._(); + $name("note").$value("Note").__().__(); - footRow._()._()._(); + footRow.__().__().__(); } protected String getAttemptId(TaskId taskId, TaskAttemptInfo ta) { @@ -256,7 +255,7 @@ public class HsTaskPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); //override the nav config from commonPReHead set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java index 9511c06724d..702c13c01de 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java @@ -28,14 +28,13 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TFOOT; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; -import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TFOOT; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.THEAD; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.InputType; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -74,10 +73,10 @@ public class HsTasksBlock extends HtmlBlock { //Create the spanning row int attemptColSpan = type == TaskType.REDUCE ? 8 : 3; thead.tr(). - th().$colspan(5).$class("ui-state-default")._("Task")._(). + th().$colspan(5).$class("ui-state-default").__("Task").__(). th().$colspan(attemptColSpan).$class("ui-state-default"). - _("Successful Attempt")._(). - _(); + __("Successful Attempt").__(). + __(); TR>> theadRow = thead. tr(). @@ -102,7 +101,7 @@ public class HsTasksBlock extends HtmlBlock { } theadRow.th("Elapsed Time"); //Attempt - TBODY> tbody = theadRow._()._().tbody(); + TBODY> tbody = theadRow.__().__().tbody(); // Write all the data into a JavaScript array of arrays for JQuery // DataTables to display @@ -173,41 +172,41 @@ public class HsTasksBlock extends HtmlBlock { } tasksTableData.append("]"); html.script().$type("text/javascript"). - _("var tasksTableData=" + tasksTableData)._(); + __("var tasksTableData=" + tasksTableData).__(); - TR>> footRow = tbody._().tfoot().tr(); + TR>> footRow = tbody.__().tfoot().tr(); footRow.th().input("search_init").$type(InputType.text).$name("task") - .$value("ID")._()._().th().input("search_init").$type(InputType.text) - .$name("state").$value("State")._()._().th().input("search_init") - .$type(InputType.text).$name("start_time").$value("Start Time")._()._() + .$value("ID").__().__().th().input("search_init").$type(InputType.text) + .$name("state").$value("State").__().__().th().input("search_init") + .$type(InputType.text).$name("start_time").$value("Start Time").__().__() .th().input("search_init").$type(InputType.text).$name("finish_time") - .$value("Finish Time")._()._().th().input("search_init") - .$type(InputType.text).$name("elapsed_time").$value("Elapsed Time")._() - ._().th().input("search_init").$type(InputType.text) - .$name("attempt_start_time").$value("Start Time")._()._(); + .$value("Finish Time").__().__().th().input("search_init") + .$type(InputType.text).$name("elapsed_time").$value("Elapsed Time").__() + .__().th().input("search_init").$type(InputType.text) + .$name("attempt_start_time").$value("Start Time").__().__(); if(type == TaskType.REDUCE) { footRow.th().input("search_init").$type(InputType.text) - .$name("shuffle_time").$value("Shuffle Time")._()._(); + .$name("shuffle_time").$value("Shuffle Time").__().__(); footRow.th().input("search_init").$type(InputType.text) - .$name("merge_time").$value("Merge Time")._()._(); + .$name("merge_time").$value("Merge Time").__().__(); } footRow.th().input("search_init").$type(InputType.text) - .$name("attempt_finish").$value("Finish Time")._()._(); + .$name("attempt_finish").$value("Finish Time").__().__(); if(type == TaskType.REDUCE) { footRow.th().input("search_init").$type(InputType.text) - .$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time")._()._(); + .$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time").__().__(); footRow.th().input("search_init").$type(InputType.text) - .$name("elapsed_merge_time").$value("Elapsed Merge Time")._()._(); + .$name("elapsed_merge_time").$value("Elapsed Merge Time").__().__(); footRow.th().input("search_init").$type(InputType.text) - .$name("elapsed_reduce_time").$value("Elapsed Reduce Time")._()._(); + .$name("elapsed_reduce_time").$value("Elapsed Reduce Time").__().__(); } footRow.th().input("search_init").$type(InputType.text) - .$name("attempt_elapsed").$value("Elapsed Time")._()._(); + .$name("attempt_elapsed").$value("Elapsed Time").__().__(); - footRow._()._()._(); + footRow.__().__().__(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java index 3c3386e6741..d0885918994 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java @@ -41,7 +41,7 @@ public class HsTasksPage extends HsView { * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "tasks"); set(DATATABLES_SELECTOR, ".dt-tasks" ); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java index 8e390872c13..510ece69bf0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsView.java @@ -39,7 +39,7 @@ public class HsView extends TwoColumnLayout { * (non-Javadoc) * @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "jobs"); set(initID(DATATABLES, "jobs"), jobsTableInit()); @@ -51,7 +51,7 @@ public class HsView extends TwoColumnLayout { * The prehead that should be common to all subclasses. * @param html used to render. */ - protected void commonPreHead(Page.HTML<_> html) { + protected void commonPreHead(Page.HTML<__> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java index 0a9d0e93918..ef68cdff513 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/io/FileBench.java @@ -170,7 +170,7 @@ public class FileBench extends Configured implements Tool { for(int i = 0; i < argv.length; ++i) { try { if ("-dir".equals(argv[i])) { - root = new Path(argv[++i]).makeQualified(fs); + root = fs.makeQualified(new Path(argv[++i])); System.out.println("DIR: " + root.toString()); } else if ("-seed".equals(argv[i])) { job.setLong("filebench.seed", Long.valueOf(argv[++i])); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java index 023da480822..85c534bfb88 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java @@ -50,8 +50,8 @@ public class MiniMRClientClusterFactory { FileSystem fs = FileSystem.get(conf); - Path testRootDir = new Path("target", identifier + "-tmpDir") - .makeQualified(fs); + Path testRootDir = fs.makeQualified( + new Path("target", identifier + "-tmpDir")); Path appJar = new Path(testRootDir, "MRAppJar.jar"); // Copy MRAppJar and make it private. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java index ca3c2dfe5b4..de7880dc656 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java @@ -47,9 +47,9 @@ public class TestCombineFileInputFormat { throw new RuntimeException("init failure", e); } } - private static Path workDir = - new Path(new Path(System.getProperty("test.build.data", "/tmp")), - "TestCombineFileInputFormat").makeQualified(localFs); + private static Path workDir = localFs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), + "TestCombineFileInputFormat")); private static void writeFile(FileSystem fs, Path name, String contents) throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineSequenceFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineSequenceFileInputFormat.java index 8d0203e925a..8cdaa8024cb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineSequenceFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineSequenceFileInputFormat.java @@ -53,10 +53,9 @@ public class TestCombineSequenceFileInputFormat { } } - @SuppressWarnings("deprecation") - private static Path workDir = - new Path(new Path(System.getProperty("test.build.data", "/tmp")), - "TestCombineSequenceFileInputFormat").makeQualified(localFs); + private static Path workDir = localFs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), + "TestCombineSequenceFileInputFormat")); @Test(timeout=10000) public void testFormat() throws Exception { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineTextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineTextInputFormat.java index ca86dd55f06..581e62b639b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineTextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineTextInputFormat.java @@ -60,10 +60,9 @@ public class TestCombineTextInputFormat { } } - @SuppressWarnings("deprecation") - private static Path workDir = - new Path(new Path(System.getProperty("test.build.data", "/tmp")), - "TestCombineTextInputFormat").makeQualified(localFs); + private static Path workDir = localFs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), + "TestCombineTextInputFormat")); // A reporter that does nothing private static final Reporter voidReporter = Reporter.NULL; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java index 22a05c5b41e..15d651d18f1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java @@ -84,9 +84,9 @@ public class TestConcatenatedCompressedInput { public void after() { ZlibFactory.loadNativeZLib(); } - private static Path workDir = - new Path(new Path(System.getProperty("test.build.data", "/tmp")), - "TestConcatenatedCompressedInput").makeQualified(localFs); + private static Path workDir = localFs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), + "TestConcatenatedCompressedInput")); private static LineReader makeStream(String str) throws IOException { return new LineReader(new ByteArrayInputStream(str.getBytes("UTF-8")), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java index d60905ed05b..af09e09535e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java @@ -342,8 +342,8 @@ public class TestMapRed extends Configured implements Tool { values.add(m); m = m.replace((char)('A' + i - 1), (char)('A' + i)); } - Path testdir = new Path( - System.getProperty("test.build.data","/tmp")).makeQualified(fs); + Path testdir = fs.makeQualified(new Path( + System.getProperty("test.build.data","/tmp"))); fs.delete(testdir, true); Path inFile = new Path(testdir, "nullin/blah"); SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, inFile, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java index f690118c807..51f0120d73f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java @@ -75,8 +75,8 @@ public class TestMiniMRChildTask { } } - private static Path TEST_ROOT_DIR = new Path("target", - TestMiniMRChildTask.class.getName() + "-tmpDir").makeQualified(localFs); + private static Path TEST_ROOT_DIR = localFs.makeQualified( + new Path("target", TestMiniMRChildTask.class.getName() + "-tmpDir")); static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar"); /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java index 5106c3843f2..67bd497763b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestTextInputFormat.java @@ -61,10 +61,10 @@ public class TestTextInputFormat { throw new RuntimeException("init failure", e); } } - @SuppressWarnings("deprecation") - private static Path workDir = - new Path(new Path(System.getProperty("test.build.data", "/tmp")), - "TestTextInputFormat").makeQualified(localFs); + + private static Path workDir = localFs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), + "TestTextInputFormat")); @Test (timeout=500000) public void testFormat() throws Exception { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index bd3e524d44b..55ddea6da38 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -871,10 +871,10 @@ public class TestYARNRunner { Configuration confSent = BuilderUtils.parseTokensConf(submissionContext); // configs that match regex should be included - Assert.assertTrue(confSent.get("dfs.namenode.rpc-address.mycluster2.nn1") - .equals("123.0.0.1")); - Assert.assertTrue(confSent.get("dfs.namenode.rpc-address.mycluster2.nn2") - .equals("123.0.0.2")); + Assert.assertEquals("123.0.0.1", + confSent.get("dfs.namenode.rpc-address.mycluster2.nn1")); + Assert.assertEquals("123.0.0.2", + confSent.get("dfs.namenode.rpc-address.mycluster2.nn2")); // configs that aren't matching regex should not be included Assert.assertTrue(confSent.get("hadoop.tmp.dir") == null || !confSent diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java index ae5572f5dcd..785898d33ed 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/join/TestWrappedRecordReaderClassloader.java @@ -50,8 +50,8 @@ public class TestWrappedRecordReaderClassloader { assertTrue(job.getClassLoader() instanceof Fake_ClassLoader); FileSystem fs = FileSystem.get(job); - Path testdir = new Path(System.getProperty("test.build.data", "/tmp")) - .makeQualified(fs); + Path testdir = fs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"))); Path base = new Path(testdir, "/empty"); Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java index 0215568c478..83d689c1e9b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java @@ -17,12 +17,8 @@ */ package org.apache.hadoop.mapreduce; -import java.io.IOException; import java.util.Random; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; import org.junit.Test; import static org.junit.Assert.*; @@ -74,40 +70,7 @@ public class TestCounters { testMaxGroups(new Counters()); } } - - @Test public void testResetOnDeserialize() throws IOException { - // Allow only one counterGroup - Configuration conf = new Configuration(); - conf.setInt(MRJobConfig.COUNTER_GROUPS_MAX_KEY, 1); - Limits.init(conf); - - Counters countersWithOneGroup = new Counters(); - countersWithOneGroup.findCounter("firstOf1Allowed", "First group"); - boolean caughtExpectedException = false; - try { - countersWithOneGroup.findCounter("secondIsTooMany", "Second group"); - } - catch (LimitExceededException _) { - caughtExpectedException = true; - } - - assertTrue("Did not throw expected exception", - caughtExpectedException); - - Counters countersWithZeroGroups = new Counters(); - DataOutputBuffer out = new DataOutputBuffer(); - countersWithZeroGroups.write(out); - - DataInputBuffer in = new DataInputBuffer(); - in.reset(out.getData(), out.getLength()); - - countersWithOneGroup.readFields(in); - - // After reset one should be able to add a group - countersWithOneGroup.findCounter("firstGroupAfterReset", "After reset " + - "limit should be set back to zero"); - } - + @Test public void testCountersIncrement() { Counters fCounters = new Counters(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java index 1fca5c982bf..cc97a14d3bb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java @@ -154,7 +154,7 @@ public class TestCombineFileInputFormat { @Override public BlockLocation[] getFileBlockLocations( FileStatus stat, long start, long len) throws IOException { - if (stat.isDir()) { + if (stat.isDirectory()) { return null; } System.out.println("File " + stat.getPath()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java index 632c40e3407..a6f8a725f69 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestMultipleInputs.java @@ -134,11 +134,11 @@ public class TestMultipleInputs extends HadoopTestCase { BufferedReader output = new BufferedReader(new InputStreamReader(fs .open(new Path(outDir, "part-r-00000")))); // reducer should have counted one key from each file - assertTrue(output.readLine().equals("a 2")); - assertTrue(output.readLine().equals("b 2")); - assertTrue(output.readLine().equals("c 2")); - assertTrue(output.readLine().equals("d 2")); - assertTrue(output.readLine().equals("e 2")); + assertEquals("a 2", output.readLine()); + assertEquals("b 2", output.readLine()); + assertEquals("c 2", output.readLine()); + assertEquals("d 2", output.readLine()); + assertEquals("e 2", output.readLine()); } @Test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java index 680e246b4e3..e3d7fa05f10 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestWrappedRRClassloader.java @@ -50,8 +50,8 @@ public class TestWrappedRRClassloader { assertTrue(conf.getClassLoader() instanceof Fake_ClassLoader); FileSystem fs = FileSystem.get(conf); - Path testdir = new Path(System.getProperty("test.build.data", "/tmp")) - .makeQualified(fs); + Path testdir = fs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"))); Path base = new Path(testdir, "/empty"); Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java index d1004b64ddc..5d536639feb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java @@ -29,12 +29,10 @@ import static org.mockito.Mockito.doReturn; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; -import org.apache.commons.logging.*; -import org.apache.commons.logging.impl.Log4JLogger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; @@ -49,9 +47,10 @@ import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcServer; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; - -import org.apache.log4j.Level; +import org.slf4j.Logger; +import org.slf4j.event.Level; import org.junit.Test; +import static org.slf4j.LoggerFactory.getLogger; /** Unit tests for using Job Token over RPC. * @@ -62,8 +61,7 @@ import org.junit.Test; public class TestUmbilicalProtocolWithJobToken { private static final String ADDRESS = "0.0.0.0"; - public static final Log LOG = LogFactory - .getLog(TestUmbilicalProtocolWithJobToken.class); + public static final Logger LOG = getLogger(TestUmbilicalProtocolWithJobToken.class); private static Configuration conf; static { @@ -73,11 +71,11 @@ public class TestUmbilicalProtocolWithJobToken { } static { - ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL); + GenericTestUtils.setLogLevel(Client.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(Server.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.TRACE); } @Test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java index 44467564326..be4638579ea 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/util/MRAsyncDiskService.java @@ -330,7 +330,7 @@ public class MRAsyncDiskService { * Returns the normalized path of a path. */ private String normalizePath(String path) { - return (new Path(path)).makeQualified(this.localFileSystem) + return this.localFileSystem.makeQualified(new Path(path)) .toUri().getPath(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index 7a0c43e14ea..274f405529a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -55,14 +55,10 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobID; -import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.TaskLog; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; @@ -114,7 +110,6 @@ public class TestMRJobs { EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED); private static final int NUM_NODE_MGRS = 3; private static final String TEST_IO_SORT_MB = "11"; - private static final String TEST_GROUP_MAX = "200"; private static final int DEFAULT_REDUCES = 2; protected int numSleepReducers = DEFAULT_REDUCES; @@ -133,8 +128,8 @@ public class TestMRJobs { } } - private static Path TEST_ROOT_DIR = new Path("target", - TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs); + private static Path TEST_ROOT_DIR = localFs.makeQualified( + new Path("target", TestMRJobs.class.getName() + "-tmpDir")); static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar"); private static final String OUTPUT_ROOT_DIR = "/tmp/" + TestMRJobs.class.getSimpleName(); @@ -466,58 +461,31 @@ public class TestMRJobs { } @Test(timeout = 300000) - public void testConfVerificationWithClassloader() throws Exception { - testConfVerification(true, false, false, false); + public void testJobClassloader() throws IOException, InterruptedException, + ClassNotFoundException { + testJobClassloader(false); } @Test(timeout = 300000) - public void testConfVerificationWithClassloaderCustomClasses() - throws Exception { - testConfVerification(true, true, false, false); + public void testJobClassloaderWithCustomClasses() throws IOException, + InterruptedException, ClassNotFoundException { + testJobClassloader(true); } - @Test(timeout = 300000) - public void testConfVerificationWithOutClassloader() throws Exception { - testConfVerification(false, false, false, false); - } - - @Test(timeout = 300000) - public void testConfVerificationWithJobClient() throws Exception { - testConfVerification(false, false, true, false); - } - - @Test(timeout = 300000) - public void testConfVerificationWithJobClientLocal() throws Exception { - testConfVerification(false, false, true, true); - } - - private void testConfVerification(boolean useJobClassLoader, - boolean useCustomClasses, boolean useJobClientForMonitring, - boolean useLocal) throws Exception { - LOG.info("\n\n\nStarting testConfVerification()" - + " jobClassloader=" + useJobClassLoader - + " customClasses=" + useCustomClasses - + " jobClient=" + useJobClientForMonitring - + " localMode=" + useLocal); + private void testJobClassloader(boolean useCustomClasses) throws IOException, + InterruptedException, ClassNotFoundException { + LOG.info("\n\n\nStarting testJobClassloader()" + + " useCustomClasses=" + useCustomClasses); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } - final Configuration clusterConfig; - if (useLocal) { - clusterConfig = new Configuration(); - conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME); - } else { - clusterConfig = mrCluster.getConfig(); - } - final JobClient jc = new JobClient(clusterConfig); - final Configuration sleepConf = new Configuration(clusterConfig); + final Configuration sleepConf = new Configuration(mrCluster.getConfig()); // set master address to local to test that local mode applied iff framework == local sleepConf.set(MRConfig.MASTER_ADDRESS, "local"); - sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, - useJobClassLoader); + sleepConf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true); if (useCustomClasses) { // to test AM loading user classes such as output format class, we want // to blacklist them from the system classes (they need to be prepended @@ -535,7 +503,6 @@ public class TestMRJobs { sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString()); sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class"); - sleepConf.set(MRJobConfig.COUNTER_GROUPS_MAX_KEY, TEST_GROUP_MAX); final SleepJob sleepJob = new SleepJob(); sleepJob.setConf(sleepConf); final Job job = sleepJob.createJob(1, 1, 10, 1, 10, 1); @@ -553,26 +520,7 @@ public class TestMRJobs { jobConf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true); } job.submit(); - final boolean succeeded; - if (useJobClientForMonitring && !useLocal) { - // We can't use getJobID in useLocal case because JobClient and Job - // point to different instances of LocalJobRunner - // - final JobID mapredJobID = JobID.downgrade(job.getJobID()); - RunningJob runningJob = null; - do { - Thread.sleep(10); - runningJob = jc.getJob(mapredJobID); - } while (runningJob == null); - Assert.assertEquals("Unexpected RunningJob's " - + MRJobConfig.COUNTER_GROUPS_MAX_KEY, - TEST_GROUP_MAX, runningJob.getConfiguration() - .get(MRJobConfig.COUNTER_GROUPS_MAX_KEY)); - runningJob.waitForCompletion(); - succeeded = runningJob.isSuccessful(); - } else { - succeeded = job.waitForCompletion(true); - } + boolean succeeded = job.waitForCompletion(true); Assert.assertTrue("Job status: " + job.getStatus().getFailureInfo(), succeeded); } @@ -1366,14 +1314,5 @@ public class TestMRJobs { + ", actual: " + ioSortMb); } } - - @Override - public void map(IntWritable key, IntWritable value, Context context) throws IOException, InterruptedException { - super.map(key, value, context); - for (int i = 0; i < 100; i++) { - context.getCounter("testCounterGroup-" + i, - "testCounter").increment(1); - } - } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java index f9236a926ae..98a6de2ce0c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java @@ -73,8 +73,8 @@ public class TestMRJobsWithHistoryService { } } - private static Path TEST_ROOT_DIR = new Path("target", - TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs); + private static Path TEST_ROOT_DIR = localFs.makeQualified( + new Path("target", TestMRJobs.class.getName() + "-tmpDir")); static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar"); @Before diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java index cb9b5e088e8..79045f9555d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java @@ -111,7 +111,10 @@ public class FadvisedFileRegion extends DefaultFileRegion { long trans = actualCount; int readSize; - ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize); + ByteBuffer byteBuffer = ByteBuffer.allocate( + Math.min( + this.shuffleBufferSize, + trans > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) trans)); while(trans > 0L && (readSize = fileChannel.read(byteBuffer, this.position+position)) > 0) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 4e7a0aed2d7..212078bbd14 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -238,14 +238,6 @@ ${project.build.directory} hadoop-annotations.jar - - xerces - xercesImpl - ${xerces.version.jdiff} - false - ${project.build.directory} - xerces.jar - @@ -283,7 +275,7 @@ sourceFiles="${dev-support.relative.dir}/jdiff/Null.java" maxmemory="${jdiff.javadoc.maxmemory}"> + path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar"> diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml index 98314564fcf..2ff93f70ab5 100644 --- a/hadoop-maven-plugins/pom.xml +++ b/hadoop-maven-plugins/pom.xml @@ -79,6 +79,9 @@ + + maven-compiler-plugin + org.apache.maven.plugins maven-plugin-plugin @@ -86,7 +89,7 @@ default-descriptor - process-classes + compile diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 6e73c0efd35..cf432186022 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -102,7 +102,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 512m + 768m true false ${maven.compile.source} @@ -194,14 +194,6 @@ ${project.build.directory} hadoop-annotations.jar - - xerces - xercesImpl - ${xerces.jdiff.version} - false - ${project.build.directory} - xerces.jar - @@ -267,7 +259,7 @@ sourceFiles="${basedir}/dev-support/jdiff/Null.java" maxmemory="${jdiff.javadoc.maxmemory}"> + path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar"> diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml old mode 100644 new mode 100755 index 8b9937906ef..e1d22b43b77 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -35,7 +35,7 @@ false true - 9.3.11.v20160721 + 9.3.19.v20170502 _ _ @@ -45,8 +45,6 @@ 1.0.9 - - 2.11.0 0.8.2.1 1.2.6 @@ -100,6 +98,10 @@ 0.1.1-alpha-SNAPSHOT + 1.0-alpha-1 + 3.3.1 + 2.4.12 + 6.2.1.jre7 1.8 @@ -124,7 +126,7 @@ 2.6 2.4.3 2.5 - 2.4 + 3.1.0 2.3 1.2 1.5 @@ -320,6 +322,13 @@ ${project.version} + + org.apache.hadoop + hadoop-yarn-server-common + ${project.version} + test-jar + + org.apache.hadoop hadoop-yarn-server-tests @@ -405,6 +414,12 @@ ${project.version} + + org.apache.hadoop + hadoop-yarn-server-router + ${project.version} + + org.apache.hadoop hadoop-mapreduce-client-jobclient @@ -1177,12 +1192,6 @@ - - xerces - xercesImpl - 2.9.1 - - org.apache.curator curator-recipes @@ -1305,6 +1314,26 @@ kerb-simplekdc 1.0.0 + + org.apache.geronimo.specs + geronimo-jcache_1.0_spec + ${jcache.version} + + + org.ehcache + ehcache + ${ehcache.version} + + + com.zaxxer + HikariCP-java7 + ${hikari.version} + + + com.microsoft.sqlserver + mssql-jdbc + ${mssql.version} + diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 9b4b082a712..eda8574c7a0 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -145,6 +145,7 @@ +

diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java index a944fc1794e..a85a739fb3f 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java @@ -129,6 +129,10 @@ public class AliyunOSSFileSystemStore { } String endPoint = conf.getTrimmed(ENDPOINT_KEY, ""); + if (StringUtils.isEmpty(endPoint)) { + throw new IllegalArgumentException("Aliyun OSS endpoint should not be " + + "null or empty. Please set proper endpoint with 'fs.oss.endpoint'."); + } CredentialsProvider provider = AliyunOSSUtils.getCredentialsProvider(conf); ossClient = new OSSClient(endPoint, provider, clientConf); diff --git a/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh b/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh index c889816a124..278a0895f2c 100755 --- a/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh +++ b/hadoop-tools/hadoop-archive-logs/src/main/shellprofile.d/hadoop-archive-logs.sh @@ -18,7 +18,7 @@ if ! declare -f mapred_subcommand_archive-logs >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then - hadoop_add_subcommand "archive-logs" "combine aggregated logs into hadoop archives" + hadoop_add_subcommand "archive-logs" client "combine aggregated logs into hadoop archives" fi # this can't be indented otherwise shelldocs won't get it diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index c2097dc673e..8ad860036c4 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -473,7 +473,7 @@ public class HadoopArchives implements Tool { conf.setLong(HAR_BLOCKSIZE_LABEL, blockSize); conf.setLong(HAR_PARTSIZE_LABEL, partSize); conf.set(DST_HAR_LABEL, archiveName); - conf.set(SRC_PARENT_LABEL, parentPath.makeQualified(fs).toString()); + conf.set(SRC_PARENT_LABEL, fs.makeQualified(parentPath).toString()); conf.setInt(HAR_REPLICATION_LABEL, repl); Path outputPath = new Path(dest, archiveName); FileOutputFormat.setOutputPath(conf, outputPath); diff --git a/hadoop-tools/hadoop-archives/src/main/shellprofile.d/hadoop-archives.sh b/hadoop-tools/hadoop-archives/src/main/shellprofile.d/hadoop-archives.sh index f74fe5ba8f1..42fc1a093bc 100755 --- a/hadoop-tools/hadoop-archives/src/main/shellprofile.d/hadoop-archives.sh +++ b/hadoop-tools/hadoop-archives/src/main/shellprofile.d/hadoop-archives.sh @@ -18,7 +18,7 @@ if ! declare -f hadoop_subcommand_archive >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "archive" "create a Hadoop archive" + hadoop_add_subcommand "archive" client "create a Hadoop archive" fi # this can't be indented otherwise shelldocs won't get it @@ -39,7 +39,7 @@ fi if ! declare -f mapred_subcommand_archive >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then - hadoop_add_subcommand "archive" "create a Hadoop archive" + hadoop_add_subcommand "archive" client "create a Hadoop archive" fi # this can't be indented otherwise shelldocs won't get it diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index c995ca602f6..91e94a6d88a 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -315,7 +315,7 @@ org.apache.hadoop hadoop-common - compile + provided org.apache.hadoop @@ -333,26 +333,6 @@ aws-java-sdk-bundle compile - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.dataformat - jackson-dataformat-cbor - - - joda-time - joda-time - junit junit diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml index 3aed5e11359..47f12df7f0a 100644 --- a/hadoop-tools/hadoop-azure-datalake/pom.xml +++ b/hadoop-tools/hadoop-azure-datalake/pom.xml @@ -110,7 +110,7 @@ com.microsoft.azure azure-data-lake-store-sdk - 2.1.4 + 2.2.1 diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java index 31df22254c2..f77d98100cd 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java @@ -54,6 +54,14 @@ public final class AdlConfKeys { public static final String TOKEN_PROVIDER_TYPE_CLIENT_CRED = "ClientCredential"; + // MSI Auth Configuration + public static final String MSI_PORT = "fs.adl.oauth2.msi.port"; + public static final String MSI_TENANT_GUID = "fs.adl.oauth2.msi.tenantguid"; + + // DeviceCode Auth configuration + public static final String DEVICE_CODE_CLIENT_APP_ID = + "fs.adl.oauth2.devicecode.clientapp.id"; + public static final String READ_AHEAD_BUFFER_SIZE_KEY = "adl.feature.client.cache.readahead"; diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java new file mode 100644 index 00000000000..70c005dbdcc --- /dev/null +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileStatus.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.fs.adl; + +import com.microsoft.azure.datalake.store.DirectoryEntry; +import com.microsoft.azure.datalake.store.DirectoryEntryType; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; + +import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_BLOCK_SIZE; +import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_REPLICATION_FACTOR; + +/** + * Shim class supporting linking against 2.x clients. + */ +class AdlFileStatus extends FileStatus { + + private static final long serialVersionUID = 0x01fcbe5e; + + private boolean hasAcl = false; + + AdlFileStatus(DirectoryEntry entry, Path path, boolean hasAcl) { + this(entry, path, entry.user, entry.group, hasAcl); + } + + AdlFileStatus(DirectoryEntry entry, Path path, + String owner, String group, boolean hasAcl) { + super(entry.length, DirectoryEntryType.DIRECTORY == entry.type, + ADL_REPLICATION_FACTOR, ADL_BLOCK_SIZE, + entry.lastModifiedTime.getTime(), entry.lastAccessTime.getTime(), + new AdlPermission(hasAcl, Short.parseShort(entry.permission, 8)), + owner, group, null, path); + this.hasAcl = hasAcl; + } + + @Override + public boolean hasAcl() { + return hasAcl; + } + + @Override + public boolean equals(Object o) { + // satisfy findbugs + return super.equals(o); + } + + @Override + public int hashCode() { + // satisfy findbugs + return super.hashCode(); + } + +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java index e63f1152569..a5e31e153c9 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java @@ -29,12 +29,13 @@ import com.google.common.annotations.VisibleForTesting; import com.microsoft.azure.datalake.store.ADLStoreClient; import com.microsoft.azure.datalake.store.ADLStoreOptions; import com.microsoft.azure.datalake.store.DirectoryEntry; -import com.microsoft.azure.datalake.store.DirectoryEntryType; import com.microsoft.azure.datalake.store.IfExists; import com.microsoft.azure.datalake.store.LatencyTracker; import com.microsoft.azure.datalake.store.UserGroupRepresentation; import com.microsoft.azure.datalake.store.oauth2.AccessTokenProvider; import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider; +import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider; +import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider; import com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider; import org.apache.commons.lang.StringUtils; @@ -129,6 +130,8 @@ public class AdlFileSystem extends FileSystem { userName = UserGroupInformation.getCurrentUser().getShortUserName(); } catch (IOException e) { userName = "hadoop"; + LOG.warn("Got exception when getting Hadoop user name." + + " Set the user name to '" + userName + "'.", e); } this.setWorkingDirectory(getHomeDirectory()); @@ -253,6 +256,12 @@ public class AdlFileSystem extends FileSystem { case ClientCredential: tokenProvider = getConfCredentialBasedTokenProvider(conf); break; + case MSI: + tokenProvider = getMsiBasedTokenProvider(conf); + break; + case DeviceCode: + tokenProvider = getDeviceCodeTokenProvider(conf); + break; case Custom: default: AzureADTokenProvider azureADTokenProvider = getCustomAccessTokenProvider( @@ -279,6 +288,19 @@ public class AdlFileSystem extends FileSystem { return new RefreshTokenBasedTokenProvider(clientId, refreshToken); } + private AccessTokenProvider getMsiBasedTokenProvider( + Configuration conf) throws IOException { + int port = Integer.parseInt(getNonEmptyVal(conf, MSI_PORT)); + String tenantGuid = getPasswordString(conf, MSI_TENANT_GUID); + return new MsiTokenProvider(port, tenantGuid); + } + + private AccessTokenProvider getDeviceCodeTokenProvider( + Configuration conf) throws IOException { + String clientAppId = getNonEmptyVal(conf, DEVICE_CODE_CLIENT_APP_ID); + return new DeviceCodeTokenProvider(clientAppId); + } + @VisibleForTesting AccessTokenProvider getTokenProvider() { return tokenProvider; @@ -604,30 +626,12 @@ public class AdlFileSystem extends FileSystem { } private FileStatus toFileStatus(final DirectoryEntry entry, final Path f) { - boolean isDirectory = entry.type == DirectoryEntryType.DIRECTORY; - long lastModificationData = entry.lastModifiedTime.getTime(); - long lastAccessTime = entry.lastAccessTime.getTime(); - // set aclBit from ADLS backend response if - // ADL_SUPPORT_ACL_BIT_IN_FSPERMISSION is true. - final boolean aclBit = aclBitStatus ? entry.aclBit : false; - - FsPermission permission = new AdlPermission(aclBit, - Short.valueOf(entry.permission, 8)); - String user = entry.user; - String group = entry.group; - - FileStatus status; + Path p = makeQualified(f); + boolean aclBit = aclBitStatus ? entry.aclBit : false; if (overrideOwner) { - status = new FileStatus(entry.length, isDirectory, ADL_REPLICATION_FACTOR, - ADL_BLOCK_SIZE, lastModificationData, lastAccessTime, permission, - userName, "hdfs", this.makeQualified(f)); - } else { - status = new FileStatus(entry.length, isDirectory, ADL_REPLICATION_FACTOR, - ADL_BLOCK_SIZE, lastModificationData, lastAccessTime, permission, - user, group, this.makeQualified(f)); + return new AdlFileStatus(entry, p, userName, "hdfs", aclBit); } - - return status; + return new AdlFileStatus(entry, p, aclBit); } /** diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/TokenProviderType.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/TokenProviderType.java index 9fd4f4f46b1..1c11d848dc2 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/TokenProviderType.java +++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/TokenProviderType.java @@ -21,5 +21,7 @@ package org.apache.hadoop.fs.adl; enum TokenProviderType { RefreshToken, ClientCredential, + MSI, + DeviceCode, Custom } diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md index d4b7d8ef955..e34da36d566 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md +++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md @@ -111,20 +111,24 @@ service associated with the client id. See [*Active Directory Library For Java*] ##### Generating the Service Principal 1. Go to [the portal](https://portal.azure.com) -2. Under "Browse", look for Active Directory and click on it. -3. Create "Web Application". Remember the name you create here - that is what you will add to your ADL account as authorized user. +2. Under services in left nav, look for Azure Active Directory and click it. +3. Using "App Registrations" in the menu, create "Web Application". Remember + the name you create here - that is what you will add to your ADL account + as authorized user. 4. Go through the wizard -5. Once app is created, Go to app configuration, and find the section on "keys" +5. Once app is created, go to "keys" under "settings" for the app 6. Select a key duration and hit save. Save the generated keys. -7. Note down the properties you will need to auth: - - The client ID +7. Go back to the App Registrations page, and click on the "Endpoints" button + at the top + a. Note down the "Token Endpoint" URL +8. Note down the properties you will need to auth: + - The "Application ID" of the Web App you created above - The key you just generated above - - The token endpoint (select "View endpoints" at the bottom of the page and copy/paste the OAuth2 .0 Token Endpoint value) - - Resource: Always https://management.core.windows.net/ , for all customers + - The token endpoint ##### Adding the service principal to your ADL Account 1. Go to the portal again, and open your ADL account -2. Select Users under Settings +2. Select `Access control (IAM)` 3. Add your user name you created in Step 6 above (note that it does not show up in the list, but will be found if you searched for the name) 4. Add "Owner" role @@ -153,6 +157,84 @@ Add the following properties to your `core-site.xml` ``` +#### Using MSI (Managed Service Identity) + +Azure VMs can be provisioned with "service identities" that are managed by the +Identity extension within the VM. The advantage of doing this is that the +credentials are managed by the extension, and do not have to be put into +core-site.xml. + +To use MSI, the following two steps are needed: +1. Modify the VM deployment template to specify the port number of the token + service exposed to localhost by the identity extension in the VM. +2. Get your Azure ActiveDirectory Tenant ID: + 1. Go to [the portal](https://portal.azure.com) + 2. Under services in left nav, look for Azure Active Directory and click on it. + 3. Click on Properties + 4. Note down the GUID shown under "Directory ID" - this is your AAD tenant ID + + +##### Configure core-site.xml +Add the following properties to your `core-site.xml` + +```xml + + fs.adl.oauth2.access.token.provider.type + Msi + + + + fs.adl.oauth2.msi.port + PORT NUMBER FROM STEP 1 ABOVE + + + + fs.adl.oauth2.msi.TenantGuid + AAD TENANT ID GUID FROM STEP 2 ABOVE + +``` + +### Using Device Code Auth for interactive login + +**Note:** This auth method is suitable for running interactive tools, but will +not work for jobs submitted to a cluster. + +To use user-based login, Azure ActiveDirectory provides login flow using +device code. + +To use device code flow, user must first create a **Native** app registration +in the Azure portal, and provide the client ID for the app as a config. Here +are the steps: + +1. Go to [the portal](https://portal.azure.com) +2. Under services in left nav, look for Azure Active Directory and click on it. +3. Using "App Registrations" in the menu, create "Native Application". +4. Go through the wizard +5. Once app is created, note down the "Appplication ID" of the app +6. Grant permissions to the app: + 1. Click on "Permissions" for the app, and then add "Azure Data Lake" and + "Windows Azure Service Management API" permissions + 2. Click on "Grant Permissions" to add the permissions to the app + +Add the following properties to your `core-site.xml` + +```xml + + fs.adl.oauth2.devicecode.clientappid + APP ID FROM STEP 5 ABOVE + +``` + +It is usually not desirable to add DeviceCode as the default token provider +type. But it can be used when using a local command: +``` + hadoop fs -Dfs.adl.oauth2.access.token.provider.type=DeviceCode -ls ... +``` +Running this will print a URL and device code that can be used to login from +any browser (even on a different machine, outside of the ssh session). Once +the login is done, the command continues. + + #### Protecting the Credentials with Credential Providers In many Hadoop clusters, the `core-site.xml` file is world-readable. To protect diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java index 36498c6696c..929b33a0427 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java @@ -23,6 +23,8 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider; +import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider; @@ -40,6 +42,9 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys .AZURE_AD_TOKEN_PROVIDER_CLASS_KEY; import static org.apache.hadoop.fs.adl.AdlConfKeys .AZURE_AD_TOKEN_PROVIDER_TYPE_KEY; +import static org.apache.hadoop.fs.adl.AdlConfKeys.DEVICE_CODE_CLIENT_APP_ID; +import static org.apache.hadoop.fs.adl.AdlConfKeys.MSI_PORT; +import static org.apache.hadoop.fs.adl.AdlConfKeys.MSI_TENANT_GUID; import static org.apache.hadoop.fs.adl.TokenProviderType.*; import static org.junit.Assert.assertEquals; @@ -97,6 +102,41 @@ public class TestAzureADTokenProvider { Assert.assertTrue(tokenProvider instanceof ClientCredsTokenProvider); } + @Test + public void testMSITokenProvider() + throws IOException, URISyntaxException { + Configuration conf = new Configuration(); + conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, MSI); + conf.set(MSI_PORT, "54321"); + conf.set(MSI_TENANT_GUID, "TENANT_GUID"); + + URI uri = new URI("adl://localhost:8080"); + AdlFileSystem fileSystem = new AdlFileSystem(); + fileSystem.initialize(uri, conf); + AccessTokenProvider tokenProvider = fileSystem.getTokenProvider(); + Assert.assertTrue(tokenProvider instanceof MsiTokenProvider); + } + + @Test + public void testDeviceCodeTokenProvider() + throws IOException, URISyntaxException { + boolean runTest = false; + if (runTest) { + // Device code auth method causes an interactive prompt, so run this only + // when running the test interactively at a local terminal. Disabling + // test by default, to not break any automation. + Configuration conf = new Configuration(); + conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, DeviceCode); + conf.set(DEVICE_CODE_CLIENT_APP_ID, "CLIENT_APP_ID_GUID"); + + URI uri = new URI("adl://localhost:8080"); + AdlFileSystem fileSystem = new AdlFileSystem(); + fileSystem.initialize(uri, conf); + AccessTokenProvider tokenProvider = fileSystem.getTokenProvider(); + Assert.assertTrue(tokenProvider instanceof DeviceCodeTokenProvider); + } + } + @Test public void testCustomCredTokenProvider() throws URISyntaxException, IOException { diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java index 0ea4b868c1d..95c23639564 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java @@ -42,8 +42,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_BLOCK_SIZE; * org.apache.hadoop.fs.adl.live testing package. */ public class TestGetFileStatus extends AdlMockWebServer { - private static final Logger LOG = LoggerFactory - .getLogger(TestGetFileStatus.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestGetFileStatus.class); @Test public void getFileStatusReturnsAsExpected() @@ -72,33 +72,31 @@ public class TestGetFileStatus extends AdlMockWebServer { fileStatus.isErasureCoded()); } - @Test - public void getFileStatusAclBit() - throws URISyntaxException, IOException { - // With ACLBIT set to true - getMockServer().enqueue(new MockResponse().setResponseCode(200) - .setBody(TestADLResponseData.getGetFileStatusJSONResponse(true))); - long startTime = Time.monotonicNow(); - FileStatus fileStatus = getMockAdlFileSystem() - .getFileStatus(new Path("/test1/test2")); - long endTime = Time.monotonicNow(); - LOG.debug("Time : " + (endTime - startTime)); - Assert.assertTrue(fileStatus.isFile()); - Assert.assertEquals(true, fileStatus.getPermission().getAclBit()); - Assert.assertEquals(fileStatus.hasAcl(), - fileStatus.getPermission().getAclBit()); + @Test + public void getFileStatusAclBit() throws URISyntaxException, IOException { + // With ACLBIT set to true + getMockServer().enqueue(new MockResponse().setResponseCode(200) + .setBody(TestADLResponseData.getGetFileStatusJSONResponse(true))); + long startTime = Time.monotonicNow(); + FileStatus fileStatus = getMockAdlFileSystem() + .getFileStatus(new Path("/test1/test2")); + long endTime = Time.monotonicNow(); + LOG.debug("Time : " + (endTime - startTime)); + Assert.assertTrue(fileStatus.isFile()); + Assert.assertTrue(fileStatus.hasAcl()); + Assert.assertTrue(fileStatus.getPermission().getAclBit()); + + // With ACLBIT set to false + getMockServer().enqueue(new MockResponse().setResponseCode(200) + .setBody(TestADLResponseData.getGetFileStatusJSONResponse(false))); + startTime = Time.monotonicNow(); + fileStatus = getMockAdlFileSystem() + .getFileStatus(new Path("/test1/test2")); + endTime = Time.monotonicNow(); + LOG.debug("Time : " + (endTime - startTime)); + Assert.assertTrue(fileStatus.isFile()); + Assert.assertFalse(fileStatus.hasAcl()); + Assert.assertFalse(fileStatus.getPermission().getAclBit()); + } - // With ACLBIT set to false - getMockServer().enqueue(new MockResponse().setResponseCode(200) - .setBody(TestADLResponseData.getGetFileStatusJSONResponse(false))); - startTime = Time.monotonicNow(); - fileStatus = getMockAdlFileSystem() - .getFileStatus(new Path("/test1/test2")); - endTime = Time.monotonicNow(); - LOG.debug("Time : " + (endTime - startTime)); - Assert.assertTrue(fileStatus.isFile()); - Assert.assertEquals(false, fileStatus.getPermission().getAclBit()); - Assert.assertEquals(fileStatus.hasAcl(), - fileStatus.getPermission().getAclBit()); - } } diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java index dac8886a4c6..db32476f459 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java @@ -102,7 +102,7 @@ public class TestListStatus extends AdlMockWebServer { } @Test - public void listStatusAclBit() + public void listStatusAcl() throws URISyntaxException, IOException { // With ACLBIT set to true getMockServer().enqueue(new MockResponse().setResponseCode(200) @@ -115,7 +115,8 @@ public class TestListStatus extends AdlMockWebServer { LOG.debug("Time : " + (endTime - startTime)); for (int i = 0; i < ls.length; i++) { Assert.assertTrue(ls[i].isDirectory()); - Assert.assertEquals(true, ls[i].getPermission().getAclBit()); + Assert.assertTrue(ls[i].hasAcl()); + Assert.assertTrue(ls[i].getPermission().getAclBit()); } // With ACLBIT set to false @@ -129,7 +130,8 @@ public class TestListStatus extends AdlMockWebServer { LOG.debug("Time : " + (endTime - startTime)); for (int i = 0; i < ls.length; i++) { Assert.assertTrue(ls[i].isDirectory()); - Assert.assertEquals(false, ls[i].getPermission().getAclBit()); + Assert.assertFalse(ls[i].hasAcl()); + Assert.assertFalse(ls[i].getPermission().getAclBit()); } } } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java index 7c198af4eca..b0cd701b9e2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java @@ -158,6 +158,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { private static final String KEY_SELF_THROTTLE_READ_FACTOR = "fs.azure.selfthrottling.read.factor"; private static final String KEY_SELF_THROTTLE_WRITE_FACTOR = "fs.azure.selfthrottling.write.factor"; + private static final String KEY_AUTO_THROTTLE_ENABLE = "fs.azure.autothrottling.enable"; + private static final String KEY_ENABLE_STORAGE_CLIENT_LOGGING = "fs.azure.storage.client.logging"; /** @@ -239,10 +241,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { // Retry parameter defaults. // - private static final int DEFAULT_MIN_BACKOFF_INTERVAL = 1 * 1000; // 1s + private static final int DEFAULT_MIN_BACKOFF_INTERVAL = 3 * 1000; // 1s private static final int DEFAULT_MAX_BACKOFF_INTERVAL = 30 * 1000; // 30s - private static final int DEFAULT_BACKOFF_INTERVAL = 1 * 1000; // 1s - private static final int DEFAULT_MAX_RETRY_ATTEMPTS = 15; + private static final int DEFAULT_BACKOFF_INTERVAL = 3 * 1000; // 1s + private static final int DEFAULT_MAX_RETRY_ATTEMPTS = 30; private static final int DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL = 3 * 1000; private static final int DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL = 90 * 1000; @@ -256,6 +258,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { private static final float DEFAULT_SELF_THROTTLE_READ_FACTOR = 1.0f; private static final float DEFAULT_SELF_THROTTLE_WRITE_FACTOR = 1.0f; + private static final boolean DEFAULT_AUTO_THROTTLE_ENABLE = false; + private static final int STORAGE_CONNECTION_TIMEOUT_DEFAULT = 90; /** @@ -283,7 +287,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { private boolean connectingUsingSAS = false; private AzureFileSystemInstrumentation instrumentation; private BandwidthGaugeUpdater bandwidthGaugeUpdater; - private final static JSON PERMISSION_JSON_SERIALIZER = createPermissionJsonSerializer(); + private static final JSON PERMISSION_JSON_SERIALIZER = createPermissionJsonSerializer(); private boolean suppressRetryPolicy = false; private boolean canCreateOrModifyContainer = false; @@ -308,6 +312,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { private float selfThrottlingReadFactor; private float selfThrottlingWriteFactor; + private boolean autoThrottlingEnabled; + private TestHookOperationContext testHookOperationContext = null; // Set if we're running against a storage emulator.. @@ -481,7 +487,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { "Cannot initialize WASB file system, conf is null"); } - if(!conf.getBoolean( + if (!conf.getBoolean( NativeAzureFileSystem.SKIP_AZURE_METRICS_PROPERTY_NAME, false)) { //If not skip azure metrics, create bandwidthGaugeUpdater this.bandwidthGaugeUpdater = new BandwidthGaugeUpdater(instrumentation); @@ -664,9 +670,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { private String getHTTPScheme() { String sessionScheme = sessionUri.getScheme(); // Check if we're on a secure URI scheme: wasbs or the legacy asvs scheme. - if (sessionScheme != null && - (sessionScheme.equalsIgnoreCase("asvs") || - sessionScheme.equalsIgnoreCase("wasbs"))) { + if (sessionScheme != null + && (sessionScheme.equalsIgnoreCase("asvs") + || sessionScheme.equalsIgnoreCase("wasbs"))) { return HTTPS_SCHEME; } else { // At this point the scheme should be either null or asv or wasb. @@ -766,6 +772,18 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { selfThrottlingWriteFactor = sessionConfiguration.getFloat( KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR); + if (!selfThrottlingEnabled) { + autoThrottlingEnabled = sessionConfiguration.getBoolean( + KEY_AUTO_THROTTLE_ENABLE, + DEFAULT_AUTO_THROTTLE_ENABLE); + if (autoThrottlingEnabled) { + ClientThrottlingIntercept.initializeSingleton(); + } + } else { + // cannot enable both self-throttling and client-throttling + autoThrottlingEnabled = false; + } + OperationContext.setLoggingEnabledByDefault(sessionConfiguration. getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false)); @@ -839,8 +857,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { CloudStorageAccount.getDevelopmentStorageAccount(); storageInteractionLayer.createBlobClient(account); } else { - blobEndPoint = new URI(getHTTPScheme() + "://" + - accountName); + blobEndPoint = new URI(getHTTPScheme() + "://" + accountName); storageInteractionLayer.createBlobClient(blobEndPoint, credentials); } suppressRetryPolicyInClientIfNeeded(); @@ -951,7 +968,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { * @throws AzureException * @throws IOException */ - private void createAzureStorageSession () + private void createAzureStorageSession() throws AzureException, IOException { // Make sure this object was properly initialized with references to @@ -1128,8 +1145,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { myDir = verifyAndConvertToStandardFormat(currentDir); } catch (URISyntaxException ex) { throw new AzureException(String.format( - "The directory %s specified in the configuration entry %s is not" + - " a valid URI.", + "The directory %s specified in the configuration entry %s is not" + + " a valid URI.", currentDir, configVar)); } if (myDir != null) { @@ -1159,8 +1176,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { public boolean isKeyForDirectorySet(String key, Set dirSet) { String defaultFS = FileSystem.getDefaultUri(sessionConfiguration).toString(); for (String dir : dirSet) { - if (dir.isEmpty() || - key.startsWith(dir + "/")) { + if (dir.isEmpty() || key.startsWith(dir + "/")) { return true; } @@ -1168,7 +1184,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { // system. // try { - URI uriPageBlobDir = new URI (dir); + URI uriPageBlobDir = new URI(dir); if (null == uriPageBlobDir.getAuthority()) { // Concatenate the default file system prefix with the relative // page blob directory path. @@ -1424,7 +1440,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { throws StorageException { if (blob instanceof CloudPageBlobWrapper){ return new PageBlobOutputStream( - (CloudPageBlobWrapper)blob, getInstrumentedContext(), sessionConfiguration); + (CloudPageBlobWrapper) blob, getInstrumentedContext(), sessionConfiguration); } else { // Handle both ClouldBlockBlobWrapperImpl and (only for the test code path) @@ -1739,12 +1755,13 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { private Iterable listRootBlobs(boolean includeMetadata, boolean useFlatBlobListing) throws StorageException, URISyntaxException { return rootDirectory.listBlobs( - null, useFlatBlobListing, - includeMetadata ? - EnumSet.of(BlobListingDetails.METADATA) : - EnumSet.noneOf(BlobListingDetails.class), null, - getInstrumentedContext()); + useFlatBlobListing, + includeMetadata + ? EnumSet.of(BlobListingDetails.METADATA) + : EnumSet.noneOf(BlobListingDetails.class), + null, + getInstrumentedContext()); } /** @@ -1771,11 +1788,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { Iterable list = rootDirectory.listBlobs(aPrefix, useFlatBlobListing, - includeMetadata ? - EnumSet.of(BlobListingDetails.METADATA) : - EnumSet.noneOf(BlobListingDetails.class), - null, - getInstrumentedContext()); + includeMetadata + ? EnumSet.of(BlobListingDetails.METADATA) + : EnumSet.noneOf(BlobListingDetails.class), + null, + getInstrumentedContext()); return list; } @@ -1941,9 +1958,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { if (selfThrottlingEnabled) { SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor, selfThrottlingWriteFactor); + } else if (autoThrottlingEnabled) { + ClientThrottlingIntercept.hook(operationContext); } - if(bandwidthGaugeUpdater != null) { + if (bandwidthGaugeUpdater != null) { //bandwidthGaugeUpdater is null when we config to skip azure metrics ResponseReceivedMetricUpdater.hook( operationContext, @@ -2440,16 +2459,19 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { try { blob.delete(operationContext, lease); } catch (StorageException e) { - LOG.error("Encountered Storage Exception for delete on Blob: {}, Exception Details: {} Error Code: {}", - blob.getUri(), e.getMessage(), e.getErrorCode()); + if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) { + LOG.error("Encountered Storage Exception for delete on Blob: {}" + + ", Exception Details: {} Error Code: {}", + blob.getUri(), e.getMessage(), e.getErrorCode()); + } // On exception, check that if: // 1. It's a BlobNotFound exception AND // 2. It got there after one-or-more retries THEN // we swallow the exception. - if (e.getErrorCode() != null && - "BlobNotFound".equals(e.getErrorCode()) && - operationContext.getRequestResults().size() > 1 && - operationContext.getRequestResults().get(0).getException() != null) { + if (e.getErrorCode() != null + && "BlobNotFound".equals(e.getErrorCode()) + && operationContext.getRequestResults().size() > 1 + && operationContext.getRequestResults().get(0).getException() != null) { LOG.debug("Swallowing delete exception on retry: {}", e.getMessage()); return; } else { @@ -2472,17 +2494,17 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { // Container doesn't exist, no need to do anything return true; } - // Get the blob reference and delete it. CloudBlobWrapper blob = getBlobReference(key); - if (blob.exists(getInstrumentedContext())) { - safeDelete(blob, lease); - return true; - } else { + safeDelete(blob, lease); + return true; + } catch (Exception e) { + if (e instanceof StorageException + && NativeAzureFileSystemHelper.isFileNotFoundException( + (StorageException) e)) { + // the file or directory does not exist return false; } - } catch (Exception e) { - // Re-throw as an Azure storage exception. throw new AzureException(e); } } @@ -2496,7 +2518,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { return delete(key, null); } catch (IOException e) { Throwable t = e.getCause(); - if(t != null && t instanceof StorageException) { + if (t != null && t instanceof StorageException) { StorageException se = (StorageException) t; if ("LeaseIdMissing".equals(se.getErrorCode())){ SelfRenewingLease lease = null; @@ -2509,7 +2531,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { throw e3; } finally { try { - if(lease != null){ + if (lease != null){ lease.free(); } } catch (Exception e4){ @@ -2561,8 +2583,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { srcBlob = getBlobReference(srcKey); if (!srcBlob.exists(getInstrumentedContext())) { - throw new AzureException ("Source blob " + srcKey + - " does not exist."); + throw new AzureException("Source blob " + srcKey + " does not exist."); } /** @@ -2600,19 +2621,19 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { if (se.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) { int copyBlobMinBackoff = sessionConfiguration.getInt( KEY_COPYBLOB_MIN_BACKOFF_INTERVAL, - DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL); + DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL); int copyBlobMaxBackoff = sessionConfiguration.getInt( KEY_COPYBLOB_MAX_BACKOFF_INTERVAL, - DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL); + DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL); int copyBlobDeltaBackoff = sessionConfiguration.getInt( KEY_COPYBLOB_BACKOFF_INTERVAL, - DEFAULT_COPYBLOB_BACKOFF_INTERVAL); + DEFAULT_COPYBLOB_BACKOFF_INTERVAL); int copyBlobMaxRetries = sessionConfiguration.getInt( KEY_COPYBLOB_MAX_IO_RETRIES, - DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS); + DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS); BlobRequestOptions options = new BlobRequestOptions(); options.setRetryPolicyFactory(new RetryExponentialRetry( @@ -2631,7 +2652,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { InputStream ipStream = null; OutputStream opStream = null; try { - if(srcBlob.getProperties().getBlobType() == BlobType.PAGE_BLOB){ + if (srcBlob.getProperties().getBlobType() == BlobType.PAGE_BLOB){ ipStream = openInputStream(srcBlob); opStream = openOutputStream(dstBlob); byte[] buffer = new byte[PageBlobFormatHelpers.PAGE_SIZE]; @@ -2817,7 +2838,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { @Override public void close() { - if(bandwidthGaugeUpdater != null) { + if (bandwidthGaugeUpdater != null) { bandwidthGaugeUpdater.close(); bandwidthGaugeUpdater = null; } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlobOperationDescriptor.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlobOperationDescriptor.java new file mode 100644 index 00000000000..6da64e124ef --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlobOperationDescriptor.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import com.microsoft.azure.storage.Constants.HeaderConstants; +import org.apache.hadoop.classification.InterfaceAudience; +import java.net.HttpURLConnection; +import java.net.URL; + +/** + * Determines the operation type (PutBlock, PutPage, GetBlob, etc) of Azure + * Storage operations. This is used by the handlers of the SendingRequestEvent + * and ResponseReceivedEvent exposed by the Azure Storage SDK to identify + * operation types (since the type of operation is not exposed by the SDK). + */ +@InterfaceAudience.Private +final class BlobOperationDescriptor { + + private BlobOperationDescriptor() { + // hide default constructor + } + + /** + * Gets the content length for the Azure Storage operation from the + * 'x-ms-range' header, if set. + * @param range the value of the 'x-ms-range' header. + * @return the content length, or zero if not set. + */ + private static long getContentLengthIfKnown(String range) { + long contentLength = 0; + // Format is "bytes=%d-%d" + if (range != null && range.startsWith("bytes=")) { + String[] offsets = range.substring("bytes=".length()).split("-"); + if (offsets.length == 2) { + contentLength = Long.parseLong(offsets[1]) - Long.parseLong(offsets[0]) + + 1; + } + } + return contentLength; + } + + /** + * Gets the content length for the Azure Storage operation, or returns zero if + * unknown. + * @param conn the connection object for the Azure Storage operation. + * @param operationType the Azure Storage operation type. + * @return the content length, or zero if unknown. + */ + static long getContentLengthIfKnown(HttpURLConnection conn, + OperationType operationType) { + long contentLength = 0; + switch (operationType) { + case AppendBlock: + case PutBlock: + String lengthString = conn.getRequestProperty( + HeaderConstants.CONTENT_LENGTH); + contentLength = (lengthString != null) + ? Long.parseLong(lengthString) + : 0; + break; + case PutPage: + case GetBlob: + contentLength = BlobOperationDescriptor.getContentLengthIfKnown( + conn.getRequestProperty("x-ms-range")); + break; + default: + break; + } + return contentLength; + } + + /** + * Gets the operation type of an Azure Storage operation. + * + * @param conn the connection object for the Azure Storage operation. + * @return the operation type. + */ + static OperationType getOperationType(HttpURLConnection conn) { + OperationType operationType = OperationType.Unknown; + String method = conn.getRequestMethod(); + String compValue = getQueryParameter(conn.getURL(), + "comp"); + + if (method.equalsIgnoreCase("PUT")) { + if (compValue != null) { + switch (compValue) { + case "metadata": + operationType = OperationType.SetMetadata; + break; + case "properties": + operationType = OperationType.SetProperties; + break; + case "block": + operationType = OperationType.PutBlock; + break; + case "page": + String pageWrite = conn.getRequestProperty("x-ms-page-write"); + if (pageWrite != null && pageWrite.equalsIgnoreCase( + "UPDATE")) { + operationType = OperationType.PutPage; + } + break; + case "appendblock": + operationType = OperationType.AppendBlock; + break; + case "blocklist": + operationType = OperationType.PutBlockList; + break; + default: + break; + } + } else { + String blobType = conn.getRequestProperty("x-ms-blob-type"); + if (blobType != null + && (blobType.equalsIgnoreCase("PageBlob") + || blobType.equalsIgnoreCase("BlockBlob") + || blobType.equalsIgnoreCase("AppendBlob"))) { + operationType = OperationType.CreateBlob; + } else if (blobType == null) { + String resType = getQueryParameter(conn.getURL(), + "restype"); + if (resType != null + && resType.equalsIgnoreCase("container")) { + operationType = operationType.CreateContainer; + } + } + } + } else if (method.equalsIgnoreCase("GET")) { + if (compValue != null) { + switch (compValue) { + case "list": + operationType = OperationType.ListBlobs; + break; + + case "metadata": + operationType = OperationType.GetMetadata; + break; + case "blocklist": + operationType = OperationType.GetBlockList; + break; + case "pagelist": + operationType = OperationType.GetPageList; + break; + default: + break; + } + } else if (conn.getRequestProperty("x-ms-range") != null) { + operationType = OperationType.GetBlob; + } + } else if (method.equalsIgnoreCase("HEAD")) { + operationType = OperationType.GetProperties; + } else if (method.equalsIgnoreCase("DELETE")) { + String resType = getQueryParameter(conn.getURL(), + "restype"); + if (resType != null + && resType.equalsIgnoreCase("container")) { + operationType = operationType.DeleteContainer; + } else { + operationType = OperationType.DeleteBlob; + } + } + return operationType; + } + + private static String getQueryParameter(URL url, String queryParameterName) { + String query = (url != null) ? url.getQuery(): null; + + if (query == null) { + return null; + } + + String searchValue = queryParameterName + "="; + + int offset = query.indexOf(searchValue); + String value = null; + if (offset != -1) { + int beginIndex = offset + searchValue.length(); + int endIndex = query.indexOf('&', beginIndex); + value = (endIndex == -1) + ? query.substring(beginIndex) + : query.substring(beginIndex, endIndex); + } + return value; + } + + @InterfaceAudience.Private + enum OperationType { + AppendBlock, + CreateBlob, + CreateContainer, + DeleteBlob, + DeleteContainer, + GetBlob, + GetBlockList, + GetMetadata, + GetPageList, + GetProperties, + ListBlobs, + PutBlock, + PutBlockList, + PutPage, + SetMetadata, + SetProperties, + Unknown + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java index 554241500df..c37b2bec6ec 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobInputStream.java @@ -43,11 +43,16 @@ final class BlockBlobInputStream extends InputStream implements Seekable { private InputStream blobInputStream = null; private int minimumReadSizeInBytes = 0; private long streamPositionAfterLastRead = -1; + // position of next network read within stream private long streamPosition = 0; + // length of stream private long streamLength = 0; private boolean closed = false; + // internal buffer, re-used for performance optimization private byte[] streamBuffer; + // zero-based offset within streamBuffer of current read position private int streamBufferPosition; + // length of data written to streamBuffer, streamBuffer may be larger private int streamBufferLength; /** @@ -81,6 +86,16 @@ final class BlockBlobInputStream extends InputStream implements Seekable { } } + /** + * Reset the internal stream buffer but do not release the memory. + * The buffer can be reused to avoid frequent memory allocations of + * a large buffer. + */ + private void resetStreamBuffer() { + streamBufferPosition = 0; + streamBufferLength = 0; + } + /** * Gets the read position of the stream. * @return the zero-based byte offset of the read position. @@ -89,7 +104,9 @@ final class BlockBlobInputStream extends InputStream implements Seekable { @Override public synchronized long getPos() throws IOException { checkState(); - return streamPosition; + return (streamBuffer != null) + ? streamPosition - streamBufferLength + streamBufferPosition + : streamPosition; } /** @@ -107,21 +124,39 @@ final class BlockBlobInputStream extends InputStream implements Seekable { throw new EOFException( FSExceptionMessages.CANNOT_SEEK_PAST_EOF + " " + pos); } - if (pos == getPos()) { + + // calculate offset between the target and current position in the stream + long offset = pos - getPos(); + + if (offset == 0) { // no=op, no state change return; } - if (streamBuffer != null) { - long offset = streamPosition - pos; - if (offset > 0 && offset < streamBufferLength) { - streamBufferPosition = streamBufferLength - (int) offset; - } else { - streamBufferPosition = streamBufferLength; + if (offset > 0) { + // forward seek, data can be skipped as an optimization + if (skip(offset) != offset) { + throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY); } + return; + } + + // reverse seek, offset is negative + if (streamBuffer != null) { + if (streamBufferPosition + offset >= 0) { + // target position is inside the stream buffer, + // only need to move backwards within the stream buffer + streamBufferPosition += offset; + } else { + // target position is outside the stream buffer, + // need to reset stream buffer and move position for next network read + resetStreamBuffer(); + streamPosition = pos; + } + } else { + streamPosition = pos; } - streamPosition = pos; // close BlobInputStream after seek is invoked because BlobInputStream // does not support seek closeBlobInputStream(); @@ -189,8 +224,7 @@ final class BlockBlobInputStream extends InputStream implements Seekable { streamBuffer = new byte[(int) Math.min(minimumReadSizeInBytes, streamLength)]; } - streamBufferPosition = 0; - streamBufferLength = 0; + resetStreamBuffer(); outputStream = new MemoryOutputStream(streamBuffer, streamBufferPosition, streamBuffer.length); needToCopy = true; @@ -295,27 +329,44 @@ final class BlockBlobInputStream extends InputStream implements Seekable { * @param n the number of bytes to be skipped. * @return the actual number of bytes skipped. * @throws IOException IO failure + * @throws IndexOutOfBoundsException if n is negative or if the sum of n + * and the current value of getPos() is greater than the length of the stream. */ @Override public synchronized long skip(long n) throws IOException { checkState(); if (blobInputStream != null) { - return blobInputStream.skip(n); - } else { - if (n < 0 || streamPosition + n > streamLength) { - throw new IndexOutOfBoundsException("skip range"); - } - - if (streamBuffer != null) { - streamBufferPosition = (n < streamBufferLength - streamBufferPosition) - ? streamBufferPosition + (int) n - : streamBufferLength; - } - - streamPosition += n; - return n; + // blobInput stream is open; delegate the work to it + long skipped = blobInputStream.skip(n); + // update position to the actual skip value + streamPosition += skipped; + return skipped; } + + // no blob stream; implement the skip logic directly + if (n < 0 || n > streamLength - getPos()) { + throw new IndexOutOfBoundsException("skip range"); + } + + if (streamBuffer != null) { + // there's a buffer, so seek with it + if (n < streamBufferLength - streamBufferPosition) { + // new range is in the buffer, so just update the buffer position + // skip within the buffer. + streamBufferPosition += (int) n; + } else { + // skip is out of range, so move position to ne value and reset + // the buffer ready for the next read() + streamPosition = getPos() + n; + resetStreamBuffer(); + } + } else { + // no stream buffer; increment the stream position ready for + // the next triggered connection & read + streamPosition += n; + } + return n; } /** diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java new file mode 100644 index 00000000000..aa7ac2e1d7b --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Throttles storage operations to minimize errors and maximum throughput. This + * improves throughput by as much as 35% when the service throttles requests due + * to exceeding account level ingress or egress limits. + */ +@InterfaceAudience.Private +class ClientThrottlingAnalyzer { + private static final Logger LOG = LoggerFactory.getLogger( + ClientThrottlingAnalyzer.class); + private static final int DEFAULT_ANALYSIS_PERIOD_MS = 10 * 1000; + private static final int MIN_ANALYSIS_PERIOD_MS = 1000; + private static final int MAX_ANALYSIS_PERIOD_MS = 30000; + private static final double MIN_ACCEPTABLE_ERROR_PERCENTAGE = .1; + private static final double MAX_EQUILIBRIUM_ERROR_PERCENTAGE = 1; + private static final double RAPID_SLEEP_DECREASE_FACTOR = .75; + private static final double RAPID_SLEEP_DECREASE_TRANSITION_PERIOD_MS = 150 + * 1000; + private static final double SLEEP_DECREASE_FACTOR = .975; + private static final double SLEEP_INCREASE_FACTOR = 1.05; + private int analysisPeriodMs; + + private volatile int sleepDuration = 0; + private long consecutiveNoErrorCount = 0; + private String name = null; + private Timer timer = null; + private AtomicReference blobMetrics = null; + + private ClientThrottlingAnalyzer() { + // hide default constructor + } + + /** + * Creates an instance of the ClientThrottlingAnalyzer class with + * the specified name. + * + * @param name a name used to identify this instance. + * + * @throws IllegalArgumentException if name is null or empty. + */ + ClientThrottlingAnalyzer(String name) throws IllegalArgumentException { + this(name, DEFAULT_ANALYSIS_PERIOD_MS); + } + + /** + * Creates an instance of the ClientThrottlingAnalyzer class with + * the specified name and period. + * + * @param name A name used to identify this instance. + * + * @param period The frequency, in milliseconds, at which metrics are + * analyzed. + * + * @throws IllegalArgumentException + * If name is null or empty. + * If period is less than 1000 or greater than 30000 milliseconds. + */ + ClientThrottlingAnalyzer(String name, int period) + throws IllegalArgumentException { + Preconditions.checkArgument( + StringUtils.isNotEmpty(name), + "The argument 'name' cannot be null or empty."); + Preconditions.checkArgument( + period >= MIN_ANALYSIS_PERIOD_MS && period <= MAX_ANALYSIS_PERIOD_MS, + "The argument 'period' must be between 1000 and 30000."); + this.name = name; + this.analysisPeriodMs = period; + this.blobMetrics = new AtomicReference( + new BlobOperationMetrics(System.currentTimeMillis())); + this.timer = new Timer( + String.format("wasb-timer-client-throttling-analyzer-%s", name)); + this.timer.schedule(new TimerTaskImpl(), + analysisPeriodMs, + analysisPeriodMs); + } + + /** + * Updates metrics with results from the current storage operation. + * + * @param count The count of bytes transferred. + * + * @param isFailedOperation True if the operation failed; otherwise false. + */ + public void addBytesTransferred(long count, boolean isFailedOperation) { + BlobOperationMetrics metrics = blobMetrics.get(); + if (isFailedOperation) { + metrics.bytesFailed.addAndGet(count); + metrics.operationsFailed.incrementAndGet(); + } else { + metrics.bytesSuccessful.addAndGet(count); + metrics.operationsSuccessful.incrementAndGet(); + } + } + + /** + * Suspends the current storage operation, as necessary, to reduce throughput. + */ + public void suspendIfNecessary() { + int duration = sleepDuration; + if (duration > 0) { + try { + Thread.sleep(duration); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + } + + @VisibleForTesting + int getSleepDuration() { + return sleepDuration; + } + + private int analyzeMetricsAndUpdateSleepDuration(BlobOperationMetrics metrics, + int sleepDuration) { + final double percentageConversionFactor = 100; + double bytesFailed = metrics.bytesFailed.get(); + double bytesSuccessful = metrics.bytesSuccessful.get(); + double operationsFailed = metrics.operationsFailed.get(); + double operationsSuccessful = metrics.operationsSuccessful.get(); + double errorPercentage = (bytesFailed <= 0) + ? 0 + : percentageConversionFactor + * bytesFailed + / (bytesFailed + bytesSuccessful); + long periodMs = metrics.endTime - metrics.startTime; + + double newSleepDuration; + + if (errorPercentage < MIN_ACCEPTABLE_ERROR_PERCENTAGE) { + ++consecutiveNoErrorCount; + // Decrease sleepDuration in order to increase throughput. + double reductionFactor = + (consecutiveNoErrorCount * analysisPeriodMs + >= RAPID_SLEEP_DECREASE_TRANSITION_PERIOD_MS) + ? RAPID_SLEEP_DECREASE_FACTOR + : SLEEP_DECREASE_FACTOR; + + newSleepDuration = sleepDuration * reductionFactor; + } else if (errorPercentage < MAX_EQUILIBRIUM_ERROR_PERCENTAGE) { + // Do not modify sleepDuration in order to stabilize throughput. + newSleepDuration = sleepDuration; + } else { + // Increase sleepDuration in order to minimize error rate. + consecutiveNoErrorCount = 0; + + // Increase sleep duration in order to reduce throughput and error rate. + // First, calculate target throughput: bytesSuccessful / periodMs. + // Next, calculate time required to send *all* data (assuming next period + // is similar to previous) at the target throughput: (bytesSuccessful + // + bytesFailed) * periodMs / bytesSuccessful. Next, subtract periodMs to + // get the total additional delay needed. + double additionalDelayNeeded = 5 * analysisPeriodMs; + if (bytesSuccessful > 0) { + additionalDelayNeeded = (bytesSuccessful + bytesFailed) + * periodMs + / bytesSuccessful + - periodMs; + } + + // amortize the additional delay needed across the estimated number of + // requests during the next period + newSleepDuration = additionalDelayNeeded + / (operationsFailed + operationsSuccessful); + + final double maxSleepDuration = analysisPeriodMs; + final double minSleepDuration = sleepDuration * SLEEP_INCREASE_FACTOR; + + // Add 1 ms to avoid rounding down and to decrease proximity to the server + // side ingress/egress limit. Ensure that the new sleep duration is + // larger than the current one to more quickly reduce the number of + // errors. Don't allow the sleep duration to grow unbounded, after a + // certain point throttling won't help, for example, if there are far too + // many tasks/containers/nodes no amount of throttling will help. + newSleepDuration = Math.max(newSleepDuration, minSleepDuration) + 1; + newSleepDuration = Math.min(newSleepDuration, maxSleepDuration); + } + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format( + "%5.5s, %10d, %10d, %10d, %10d, %6.2f, %5d, %5d, %5d", + name, + (int) bytesFailed, + (int) bytesSuccessful, + (int) operationsFailed, + (int) operationsSuccessful, + errorPercentage, + periodMs, + (int) sleepDuration, + (int) newSleepDuration)); + } + + return (int) newSleepDuration; + } + + /** + * Timer callback implementation for periodically analyzing metrics. + */ + class TimerTaskImpl extends TimerTask { + private AtomicInteger doingWork = new AtomicInteger(0); + + /** + * Periodically analyzes a snapshot of the blob storage metrics and updates + * the sleepDuration in order to appropriately throttle storage operations. + */ + @Override + public void run() { + boolean doWork = false; + try { + doWork = doingWork.compareAndSet(0, 1); + + // prevent concurrent execution of this task + if (!doWork) { + return; + } + + long now = System.currentTimeMillis(); + if (now - blobMetrics.get().startTime >= analysisPeriodMs) { + BlobOperationMetrics oldMetrics = blobMetrics.getAndSet( + new BlobOperationMetrics(now)); + oldMetrics.endTime = now; + sleepDuration = analyzeMetricsAndUpdateSleepDuration(oldMetrics, + sleepDuration); + } + } + finally { + if (doWork) { + doingWork.set(0); + } + } + } + } + + /** + * Stores blob operation metrics during each analysis period. + */ + static class BlobOperationMetrics { + private AtomicLong bytesFailed; + private AtomicLong bytesSuccessful; + private AtomicLong operationsFailed; + private AtomicLong operationsSuccessful; + private long endTime; + private long startTime; + + BlobOperationMetrics(long startTime) { + this.startTime = startTime; + this.bytesFailed = new AtomicLong(); + this.bytesSuccessful = new AtomicLong(); + this.operationsFailed = new AtomicLong(); + this.operationsSuccessful = new AtomicLong(); + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingIntercept.java new file mode 100644 index 00000000000..9da993bd237 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingIntercept.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import com.microsoft.azure.storage.ErrorReceivingResponseEvent; +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.RequestResult; +import com.microsoft.azure.storage.ResponseReceivedEvent; +import com.microsoft.azure.storage.SendingRequestEvent; +import com.microsoft.azure.storage.StorageEvent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.classification.InterfaceAudience; + +import java.net.HttpURLConnection; + +/** + * Throttles Azure Storage read and write operations to achieve maximum + * throughput by minimizing errors. The errors occur when the account ingress + * or egress limits are exceeded and the server-side throttles requests. + * Server-side throttling causes the retry policy to be used, but the retry + * policy sleeps for long periods of time causing the total ingress or egress + * throughput to be as much as 35% lower than optimal. The retry policy is also + * after the fact, in that it applies after a request fails. On the other hand, + * the client-side throttling implemented here happens before requests are made + * and sleeps just enough to minimize errors, allowing optimal ingress and/or + * egress throughput. + */ +@InterfaceAudience.Private +final class ClientThrottlingIntercept { + private static final Logger LOG = LoggerFactory.getLogger( + ClientThrottlingIntercept.class); + private static ClientThrottlingIntercept singleton = null; + private ClientThrottlingAnalyzer readThrottler = null; + private ClientThrottlingAnalyzer writeThrottler = null; + + // Hide default constructor + private ClientThrottlingIntercept() { + readThrottler = new ClientThrottlingAnalyzer("read"); + writeThrottler = new ClientThrottlingAnalyzer("write"); + LOG.debug("Client-side throttling is enabled for the WASB file system."); + } + + static synchronized void initializeSingleton() { + if (singleton == null) { + singleton = new ClientThrottlingIntercept(); + } + } + + static void hook(OperationContext context) { + context.getErrorReceivingResponseEventHandler().addListener( + new ErrorReceivingResponseEventHandler()); + context.getSendingRequestEventHandler().addListener( + new SendingRequestEventHandler()); + context.getResponseReceivedEventHandler().addListener( + new ResponseReceivedEventHandler()); + } + + private static void updateMetrics(HttpURLConnection conn, + RequestResult result) { + BlobOperationDescriptor.OperationType operationType + = BlobOperationDescriptor.getOperationType(conn); + int status = result.getStatusCode(); + long contentLength = 0; + // If the socket is terminated prior to receiving a response, the HTTP + // status may be 0 or -1. A status less than 200 or greater than or equal + // to 500 is considered an error. + boolean isFailedOperation = (status < HttpURLConnection.HTTP_OK + || status >= java.net.HttpURLConnection.HTTP_INTERNAL_ERROR); + + switch (operationType) { + case AppendBlock: + case PutBlock: + case PutPage: + contentLength = BlobOperationDescriptor.getContentLengthIfKnown(conn, + operationType); + if (contentLength > 0) { + singleton.writeThrottler.addBytesTransferred(contentLength, + isFailedOperation); + } + break; + case GetBlob: + contentLength = BlobOperationDescriptor.getContentLengthIfKnown(conn, + operationType); + if (contentLength > 0) { + singleton.readThrottler.addBytesTransferred(contentLength, + isFailedOperation); + } + break; + default: + break; + } + } + + /** + * Called when a network error occurs before the HTTP status and response + * headers are received. Client-side throttling uses this to collect metrics. + * + * @param event The connection, operation, and request state. + */ + public static void errorReceivingResponse(ErrorReceivingResponseEvent event) { + updateMetrics((HttpURLConnection) event.getConnectionObject(), + event.getRequestResult()); + } + + /** + * Called before the Azure Storage SDK sends a request. Client-side throttling + * uses this to suspend the request, if necessary, to minimize errors and + * maximize throughput. + * + * @param event The connection, operation, and request state. + */ + public static void sendingRequest(SendingRequestEvent event) { + BlobOperationDescriptor.OperationType operationType + = BlobOperationDescriptor.getOperationType( + (HttpURLConnection) event.getConnectionObject()); + switch (operationType) { + case GetBlob: + singleton.readThrottler.suspendIfNecessary(); + break; + case AppendBlock: + case PutBlock: + case PutPage: + singleton.writeThrottler.suspendIfNecessary(); + break; + default: + break; + } + } + + /** + * Called after the Azure Storage SDK receives a response. Client-side + * throttling uses this to collect metrics. + * + * @param event The connection, operation, and request state. + */ + public static void responseReceived(ResponseReceivedEvent event) { + updateMetrics((HttpURLConnection) event.getConnectionObject(), + event.getRequestResult()); + } + + /** + * The ErrorReceivingResponseEvent is fired when the Azure Storage SDK + * encounters a network error before the HTTP status and response headers are + * received. + */ + @InterfaceAudience.Private + static class ErrorReceivingResponseEventHandler + extends StorageEvent { + + /** + * Called when a network error occurs before the HTTP status and response + * headers are received. Client-side throttling uses this to collect + * metrics. + * + * @param event The connection, operation, and request state. + */ + @Override + public void eventOccurred(ErrorReceivingResponseEvent event) { + singleton.errorReceivingResponse(event); + } + } + + /** + * The SendingRequestEvent is fired before the Azure Storage SDK sends a + * request. + */ + @InterfaceAudience.Private + static class SendingRequestEventHandler + extends StorageEvent { + + /** + * Called before the Azure Storage SDK sends a request. Client-side + * throttling uses this to suspend the request, if necessary, to minimize + * errors and maximize throughput. + * + * @param event The connection, operation, and request state. + */ + @Override + public void eventOccurred(SendingRequestEvent event) { + singleton.sendingRequest(event); + } + } + + /** + * The ResponseReceivedEvent is fired after the Azure Storage SDK receives a + * response. + */ + @InterfaceAudience.Private + static class ResponseReceivedEventHandler + extends StorageEvent { + + /** + * Called after the Azure Storage SDK receives a response. Client-side + * throttling uses this + * to collect metrics. + * + * @param event The connection, operation, and request state. + */ + @Override + public void eventOccurred(ResponseReceivedEvent event) { + singleton.responseReceived(event); + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index a7558a35b85..2abc6c6e800 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -2043,7 +2043,12 @@ public class NativeAzureFileSystem extends FileSystem { AzureFileSystemThreadTask task = new AzureFileSystemThreadTask() { @Override public boolean execute(FileMetadata file) throws IOException{ - return deleteFile(file.getKey(), file.isDir()); + if (!deleteFile(file.getKey(), file.isDir())) { + LOG.warn("Attempt to delete non-existent {} {}", + file.isDir() ? "directory" : "file", + file.getKey()); + } + return true; } }; @@ -2080,30 +2085,28 @@ public class NativeAzureFileSystem extends FileSystem { return new AzureFileSystemThreadPoolExecutor(threadCount, threadNamePrefix, operation, key, config); } - // Delete single file / directory from key. + /** + * Delete the specified file or directory and increment metrics. + * If the file or directory does not exist, the operation returns false. + * @param path the path to a file or directory. + * @param isDir true if the path is a directory; otherwise false. + * @return true if delete is successful; otherwise false. + * @throws IOException if an IO error occurs while attempting to delete the + * path. + * + */ @VisibleForTesting - boolean deleteFile(String key, boolean isDir) throws IOException { - try { - if (store.delete(key)) { - if (isDir) { - instrumentation.directoryDeleted(); - } else { - instrumentation.fileDeleted(); - } - return true; - } else { - return false; - } - } catch(IOException e) { - Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(e); - - if (innerException instanceof StorageException - && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) { - return false; - } - - throw e; + boolean deleteFile(String path, boolean isDir) throws IOException { + if (!store.delete(path)) { + return false; } + + if (isDir) { + instrumentation.directoryDeleted(); + } else { + instrumentation.fileDeleted(); + } + return true; } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java index 51867cd728a..d04a19ca3e3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestBase.java @@ -67,4 +67,8 @@ public abstract class AbstractWasbTestBase { protected abstract AzureBlobStorageTestAccount createTestAccount() throws Exception; + + protected AzureBlobStorageTestAccount getTestAccount() { + return testAccount; + } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java new file mode 100644 index 00000000000..07d4ebc8632 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java @@ -0,0 +1,305 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.ResponseReceivedEvent; +import com.microsoft.azure.storage.SendingRequestEvent; +import com.microsoft.azure.storage.StorageEvent; +import com.microsoft.azure.storage.blob.BlobInputStream; +import com.microsoft.azure.storage.blob.BlobOutputStream; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.CloudPageBlob; +import org.apache.hadoop.classification.InterfaceAudience; +import org.junit.Test; + +import java.net.HttpURLConnection; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertEquals; + +/** + * Tests for BlobOperationDescriptor. + */ +public class TestBlobOperationDescriptor extends AbstractWasbTestBase { + private BlobOperationDescriptor.OperationType lastOperationTypeReceived; + private BlobOperationDescriptor.OperationType lastOperationTypeSent; + private long lastContentLengthReceived; + + @Override + protected AzureBlobStorageTestAccount createTestAccount() throws Exception { + return AzureBlobStorageTestAccount.create(); + } + + @Test + public void testAppendBlockOperations() throws Exception { + CloudBlobContainer container = getTestAccount().getRealContainer(); + + OperationContext context = new OperationContext(); + context.getResponseReceivedEventHandler().addListener( + new ResponseReceivedEventHandler()); + context.getSendingRequestEventHandler().addListener( + new SendingRequestEventHandler()); + + CloudAppendBlob appendBlob = container.getAppendBlobReference( + "testAppendBlockOperations"); + assertNull(lastOperationTypeSent); + assertNull(lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + try ( + BlobOutputStream output + = appendBlob.openWriteNew(null, null, context); + ) { + assertEquals(BlobOperationDescriptor.OperationType.CreateBlob, + lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + String message = "this is a test"; + output.write(message.getBytes("UTF-8")); + output.flush(); + assertEquals(BlobOperationDescriptor.OperationType.AppendBlock, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.AppendBlock, + lastOperationTypeReceived); + assertEquals(message.length(), lastContentLengthReceived); + } + } + + @Test + public void testPutBlockOperations() throws Exception { + CloudBlobContainer container = getTestAccount().getRealContainer(); + + OperationContext context = new OperationContext(); + context.getResponseReceivedEventHandler().addListener( + new ResponseReceivedEventHandler()); + context.getSendingRequestEventHandler().addListener( + new SendingRequestEventHandler()); + + CloudBlockBlob blockBlob = container.getBlockBlobReference( + "testPutBlockOperations"); + assertNull(lastOperationTypeSent); + assertNull(lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + try ( + BlobOutputStream output + = blockBlob.openOutputStream(null, + null, + context); + ) { + assertNull(lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + String message = "this is a test"; + output.write(message.getBytes("UTF-8")); + output.flush(); + assertEquals(BlobOperationDescriptor.OperationType.PutBlock, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.PutBlock, + lastOperationTypeReceived); + assertEquals(message.length(), lastContentLengthReceived); + } + assertEquals(BlobOperationDescriptor.OperationType.PutBlockList, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.PutBlockList, + lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + } + + @Test + public void testPutPageOperations() throws Exception { + CloudBlobContainer container = getTestAccount().getRealContainer(); + + OperationContext context = new OperationContext(); + context.getResponseReceivedEventHandler().addListener( + new ResponseReceivedEventHandler()); + context.getSendingRequestEventHandler().addListener( + new SendingRequestEventHandler()); + + CloudPageBlob pageBlob = container.getPageBlobReference( + "testPutPageOperations"); + assertNull(lastOperationTypeSent); + assertNull(lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + try ( + BlobOutputStream output = pageBlob.openWriteNew(1024, + null, + null, + context); + ) { + assertEquals(BlobOperationDescriptor.OperationType.CreateBlob, + lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + final int pageSize = 512; + byte[] buffer = new byte[pageSize]; + output.write(buffer); + output.flush(); + assertEquals(BlobOperationDescriptor.OperationType.PutPage, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.PutPage, + lastOperationTypeReceived); + assertEquals(buffer.length, lastContentLengthReceived); + } + } + + @Test + public void testGetBlobOperations() throws Exception { + CloudBlobContainer container = getTestAccount().getRealContainer(); + + OperationContext context = new OperationContext(); + context.getResponseReceivedEventHandler().addListener( + new ResponseReceivedEventHandler()); + context.getSendingRequestEventHandler().addListener( + new SendingRequestEventHandler()); + + CloudBlockBlob blockBlob = container.getBlockBlobReference( + "testGetBlobOperations"); + assertNull(lastOperationTypeSent); + assertNull(lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + String message = "this is a test"; + + try ( + BlobOutputStream output = blockBlob.openOutputStream(null, + null, + context); + ) { + assertNull(lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + output.write(message.getBytes("UTF-8")); + output.flush(); + assertEquals(BlobOperationDescriptor.OperationType.PutBlock, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.PutBlock, + lastOperationTypeReceived); + assertEquals(message.length(), lastContentLengthReceived); + } + assertEquals(BlobOperationDescriptor.OperationType.PutBlockList, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.PutBlockList, + lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + try ( + BlobInputStream input = blockBlob.openInputStream(null, + null, + context); + ) { + assertEquals(BlobOperationDescriptor.OperationType.GetProperties, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.GetProperties, + lastOperationTypeReceived); + assertEquals(0, lastContentLengthReceived); + + byte[] buffer = new byte[1024]; + int numBytesRead = input.read(buffer); + assertEquals(BlobOperationDescriptor.OperationType.GetBlob, + lastOperationTypeSent); + assertEquals(BlobOperationDescriptor.OperationType.GetBlob, + lastOperationTypeReceived); + assertEquals(message.length(), lastContentLengthReceived); + assertEquals(numBytesRead, lastContentLengthReceived); + } + } + + /** + * Called after the Azure Storage SDK receives a response. + * + * @param event The connection, operation, and request state. + */ + private void responseReceived(ResponseReceivedEvent event) { + HttpURLConnection conn = (HttpURLConnection) event.getConnectionObject(); + BlobOperationDescriptor.OperationType operationType + = BlobOperationDescriptor.getOperationType(conn); + lastOperationTypeReceived = operationType; + + switch (operationType) { + case AppendBlock: + case PutBlock: + case PutPage: + lastContentLengthReceived + = BlobOperationDescriptor.getContentLengthIfKnown(conn, + operationType); + break; + case GetBlob: + lastContentLengthReceived + = BlobOperationDescriptor.getContentLengthIfKnown(conn, + operationType); + break; + default: + lastContentLengthReceived = 0; + break; + } + } + + /** + * Called before the Azure Storage SDK sends a request. + * + * @param event The connection, operation, and request state. + */ + private void sendingRequest(SendingRequestEvent event) { + this.lastOperationTypeSent + = BlobOperationDescriptor.getOperationType( + (HttpURLConnection) event.getConnectionObject()); + } + + /** + * The ResponseReceivedEvent is fired after the Azure Storage SDK receives a + * response. + */ + @InterfaceAudience.Private + class ResponseReceivedEventHandler + extends StorageEvent { + + /** + * Called after the Azure Storage SDK receives a response. + * + * @param event The connection, operation, and request state. + */ + @Override + public void eventOccurred(ResponseReceivedEvent event) { + responseReceived(event); + } + } + + /** + * The SendingRequestEvent is fired before the Azure Storage SDK sends a + * request. + */ + @InterfaceAudience.Private + class SendingRequestEventHandler extends StorageEvent { + + /** + * Called before the Azure Storage SDK sends a request. + * + * @param event The connection, operation, and request state. + */ + @Override + public void eventOccurred(SendingRequestEvent event) { + sendingRequest(event); + } + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java index 24535840b23..0ae4012847b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlockBlobInputStream.java @@ -155,7 +155,7 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase { } LOG.info("Creating test file {} of size: {}", TEST_FILE_PATH, - TEST_FILE_SIZE ); + TEST_FILE_SIZE); ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer(); try(FSDataOutputStream outputStream = fs.create(TEST_FILE_PATH)) { @@ -198,7 +198,7 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase { } @Test - public void test_0200_BasicReadTestV2() throws Exception { + public void test_0200_BasicReadTest() throws Exception { assumeHugeFileExists(); try ( @@ -214,12 +214,12 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase { // v1 forward seek and read a kilobyte into first kilobyte of bufferV1 inputStreamV1.seek(5 * MEGABYTE); int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE); - assertEquals(numBytesReadV1, KILOBYTE); + assertEquals(KILOBYTE, numBytesReadV1); // v2 forward seek and read a kilobyte into first kilobyte of bufferV2 inputStreamV2.seek(5 * MEGABYTE); int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE); - assertEquals(numBytesReadV2, KILOBYTE); + assertEquals(KILOBYTE, numBytesReadV2); assertArrayEquals(bufferV1, bufferV2); @@ -229,17 +229,90 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase { // v1 reverse seek and read a megabyte into last megabyte of bufferV1 inputStreamV1.seek(3 * MEGABYTE); numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len); - assertEquals(numBytesReadV1, len); + assertEquals(len, numBytesReadV1); // v2 reverse seek and read a megabyte into last megabyte of bufferV2 inputStreamV2.seek(3 * MEGABYTE); numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len); - assertEquals(numBytesReadV2, len); + assertEquals(len, numBytesReadV2); assertArrayEquals(bufferV1, bufferV2); } } + @Test + public void test_0201_RandomReadTest() throws Exception { + assumeHugeFileExists(); + + try ( + FSDataInputStream inputStreamV1 + = accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH); + + FSDataInputStream inputStreamV2 + = accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH); + ) { + final int bufferSize = 4 * KILOBYTE; + byte[] bufferV1 = new byte[bufferSize]; + byte[] bufferV2 = new byte[bufferV1.length]; + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + inputStreamV1.seek(0); + inputStreamV2.seek(0); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + int seekPosition = 2 * KILOBYTE; + inputStreamV1.seek(seekPosition); + inputStreamV2.seek(seekPosition); + + inputStreamV1.seek(0); + inputStreamV2.seek(0); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + seekPosition = 5 * KILOBYTE; + inputStreamV1.seek(seekPosition); + inputStreamV2.seek(seekPosition); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + seekPosition = 10 * KILOBYTE; + inputStreamV1.seek(seekPosition); + inputStreamV2.seek(seekPosition); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + + seekPosition = 4100 * KILOBYTE; + inputStreamV1.seek(seekPosition); + inputStreamV2.seek(seekPosition); + + verifyConsistentReads(inputStreamV1, inputStreamV2, bufferV1, bufferV2); + } + } + + private void verifyConsistentReads(FSDataInputStream inputStreamV1, + FSDataInputStream inputStreamV2, + byte[] bufferV1, + byte[] bufferV2) throws IOException { + int size = bufferV1.length; + final int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, size); + assertEquals("Bytes read from V1 stream", size, numBytesReadV1); + + final int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, size); + assertEquals("Bytes read from V2 stream", size, numBytesReadV2); + + assertArrayEquals("Mismatch in read data", bufferV1, bufferV2); + } + /** * Validates the implementation of InputStream.markSupported. * @throws IOException diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java new file mode 100644 index 00000000000..307e5af5775 --- /dev/null +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestClientThrottlingAnalyzer.java @@ -0,0 +1,177 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.azure; + +import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer; +import org.junit.Test; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; + +/** + * Tests for ClientThrottlingAnalyzer. + */ +public class TestClientThrottlingAnalyzer { + private static final int ANALYSIS_PERIOD = 1000; + private static final int ANALYSIS_PERIOD_PLUS_10_PERCENT = ANALYSIS_PERIOD + + ANALYSIS_PERIOD / 10; + private static final long MEGABYTE = 1024 * 1024; + private static final int MAX_ACCEPTABLE_PERCENT_DIFFERENCE = 20; + + private void sleep(long milliseconds) { + try { + Thread.sleep(milliseconds); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + private void fuzzyValidate(long expected, long actual, double percentage) { + final double lowerBound = Math.max(expected - percentage / 100 * expected, 0); + final double upperBound = expected + percentage / 100 * expected; + + assertTrue( + String.format( + "The actual value %1$d is not within the expected range: " + + "[%2$.2f, %3$.2f].", + actual, + lowerBound, + upperBound), + actual >= lowerBound && actual <= upperBound); + } + + private void validate(long expected, long actual) { + assertEquals( + String.format("The actual value %1$d is not the expected value %2$d.", + actual, + expected), + expected, actual); + } + + private void validateLessThanOrEqual(long maxExpected, long actual) { + assertTrue( + String.format( + "The actual value %1$d is not less than or equal to the maximum" + + " expected value %2$d.", + actual, + maxExpected), + actual < maxExpected); + } + + /** + * Ensure that there is no waiting (sleepDuration = 0) if the metrics have + * never been updated. This validates proper initialization of + * ClientThrottlingAnalyzer. + */ + @Test + public void testNoMetricUpdatesThenNoWaiting() { + ClientThrottlingAnalyzer analyzer = new ClientThrottlingAnalyzer( + "test", + ANALYSIS_PERIOD); + validate(0, analyzer.getSleepDuration()); + sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT); + validate(0, analyzer.getSleepDuration()); + } + + /** + * Ensure that there is no waiting (sleepDuration = 0) if the metrics have + * only been updated with successful requests. + */ + @Test + public void testOnlySuccessThenNoWaiting() { + ClientThrottlingAnalyzer analyzer = new ClientThrottlingAnalyzer( + "test", + ANALYSIS_PERIOD); + analyzer.addBytesTransferred(8 * MEGABYTE, false); + validate(0, analyzer.getSleepDuration()); + sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT); + validate(0, analyzer.getSleepDuration()); + } + + /** + * Ensure that there is waiting (sleepDuration != 0) if the metrics have + * only been updated with failed requests. Also ensure that the + * sleepDuration decreases over time. + */ + @Test + public void testOnlyErrorsAndWaiting() { + ClientThrottlingAnalyzer analyzer = new ClientThrottlingAnalyzer( + "test", + ANALYSIS_PERIOD); + validate(0, analyzer.getSleepDuration()); + analyzer.addBytesTransferred(4 * MEGABYTE, true); + sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT); + final int expectedSleepDuration1 = 1100; + validateLessThanOrEqual(expectedSleepDuration1, analyzer.getSleepDuration()); + sleep(10 * ANALYSIS_PERIOD); + final int expectedSleepDuration2 = 900; + validateLessThanOrEqual(expectedSleepDuration2, analyzer.getSleepDuration()); + } + + /** + * Ensure that there is waiting (sleepDuration != 0) if the metrics have + * only been updated with both successful and failed requests. Also ensure + * that the sleepDuration decreases over time. + */ + @Test + public void testSuccessAndErrorsAndWaiting() { + ClientThrottlingAnalyzer analyzer = new ClientThrottlingAnalyzer( + "test", + ANALYSIS_PERIOD); + validate(0, analyzer.getSleepDuration()); + analyzer.addBytesTransferred(8 * MEGABYTE, false); + analyzer.addBytesTransferred(2 * MEGABYTE, true); + sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT); + NanoTimer timer = new NanoTimer(); + analyzer.suspendIfNecessary(); + final int expectedElapsedTime = 126; + fuzzyValidate(expectedElapsedTime, + timer.elapsedTimeMs(), + MAX_ACCEPTABLE_PERCENT_DIFFERENCE); + sleep(10 * ANALYSIS_PERIOD); + final int expectedSleepDuration = 110; + validateLessThanOrEqual(expectedSleepDuration, analyzer.getSleepDuration()); + } + + /** + * Ensure that there is waiting (sleepDuration != 0) if the metrics have + * only been updated with many successful and failed requests. Also ensure + * that the sleepDuration decreases to zero over time. + */ + @Test + public void testManySuccessAndErrorsAndWaiting() { + ClientThrottlingAnalyzer analyzer = new ClientThrottlingAnalyzer( + "test", + ANALYSIS_PERIOD); + validate(0, analyzer.getSleepDuration()); + final int numberOfRequests = 20; + for (int i = 0; i < numberOfRequests; i++) { + analyzer.addBytesTransferred(8 * MEGABYTE, false); + analyzer.addBytesTransferred(2 * MEGABYTE, true); + } + sleep(ANALYSIS_PERIOD_PLUS_10_PERCENT); + NanoTimer timer = new NanoTimer(); + analyzer.suspendIfNecessary(); + fuzzyValidate(7, + timer.elapsedTimeMs(), + MAX_ACCEPTABLE_PERCENT_DIFFERENCE); + sleep(10 * ANALYSIS_PERIOD); + validate(0, analyzer.getSleepDuration()); + } +} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java index ce3cdee5c18..fd3690c4a6c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java @@ -39,6 +39,8 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; /** * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations. @@ -529,30 +531,65 @@ public class TestFileSystemOperationsWithThreads extends AbstractWasbTestBase { } /* - * Test case for delete operation with multiple threads and flat listing enabled. + * Validate that when a directory is deleted recursively, the operation succeeds + * even if a child directory delete fails because the directory does not exist. + * This can happen if a child directory is deleted by an external agent while + * the parent is in progress of being deleted recursively. */ @Test - public void testDeleteSingleDeleteFailure() throws Exception { + public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted() + throws Exception { + testRecusiveDirectoryDelete(true); + } + /* + * Validate that when a directory is deleted recursively, the operation succeeds + * even if a file delete fails because it does not exist. + * This can happen if a file is deleted by an external agent while + * the parent directory is in progress of being deleted. + */ + @Test + public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse() + throws Exception { + testRecusiveDirectoryDelete(false); + } + + private void testRecusiveDirectoryDelete(boolean useDir) throws Exception { + String childPathToBeDeletedByExternalAgent = (useDir) + ? "root/0" + : "root/0/fileToRename"; // Spy azure file system object and return false for deleting one file - LOG.info("testDeleteSingleDeleteFailure"); NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0"))); - Mockito.when(mockFs.deleteFile(path, true)).thenReturn(false); + String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path( + childPathToBeDeletedByExternalAgent))); + + Answer answer = new Answer() { + public Boolean answer(InvocationOnMock invocation) throws Throwable { + String path = (String) invocation.getArguments()[0]; + boolean isDir = (boolean) invocation.getArguments()[1]; + boolean realResult = fs.deleteFile(path, isDir); + assertTrue(realResult); + boolean fakeResult = false; + return fakeResult; + } + }; + + Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer); createFolder(mockFs, "root"); Path sourceFolder = new Path("root"); - assertFalse(mockFs.delete(sourceFolder, true)); - assertTrue(mockFs.exists(sourceFolder)); - // Validate from logs that threads are enabled and delete operation failed. + assertTrue(mockFs.delete(sourceFolder, true)); + assertFalse(mockFs.exists(sourceFolder)); + + // Validate from logs that threads are enabled, that a child directory was + // deleted by an external caller, and the parent delete operation still + // succeeds. String content = logs.getOutput(); assertInLog(content, "Using thread pool for Delete operation with threads"); - assertInLog(content, "Delete operation failed for file " + path); - assertInLog(content, - "Terminating execution of Delete operation now as some other thread already got exception or operation failed"); - assertInLog(content, "Failed to delete files / subfolders in blob"); + assertInLog(content, String.format("Attempt to delete non-existent %s %s", + useDir ? "directory" : "file", path)); } /* diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java index 9d5d6a22c33..544d6ab4fc0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java @@ -73,20 +73,20 @@ public class TestOutOfBandAzureBlobOperations { FileStatus[] obtained = fs.listStatus(new Path("/root/b")); assertNotNull(obtained); assertEquals(1, obtained.length); - assertFalse(obtained[0].isDir()); + assertFalse(obtained[0].isDirectory()); assertEquals("/root/b", obtained[0].getPath().toUri().getPath()); // List the directory obtained = fs.listStatus(new Path("/root")); assertNotNull(obtained); assertEquals(1, obtained.length); - assertFalse(obtained[0].isDir()); + assertFalse(obtained[0].isDirectory()); assertEquals("/root/b", obtained[0].getPath().toUri().getPath()); // Get the directory's file status FileStatus dirStatus = fs.getFileStatus(new Path("/root")); assertNotNull(dirStatus); - assertTrue(dirStatus.isDir()); + assertTrue(dirStatus.isDirectory()); assertEquals("/root", dirStatus.getPath().toUri().getPath()); } @@ -114,7 +114,7 @@ public class TestOutOfBandAzureBlobOperations { FileStatus[] listResult = fs.listStatus(new Path("/root/b")); // File should win. assertEquals(1, listResult.length); - assertFalse(listResult[0].isDir()); + assertFalse(listResult[0].isDirectory()); try { // Trying to delete root/b/c would cause a dilemma for WASB, so // it should throw. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java index 393dcfdd7c0..8aad9e9ecfc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java @@ -282,6 +282,8 @@ public class TestWasbRemoteCallHelper @Test public void testWhenOneInstanceIsDown() throws Throwable { + boolean isAuthorizationCachingEnabled = fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, false); + // set up mocks HttpClient mockHttpClient = Mockito.mock(HttpClient.class); HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class); @@ -356,8 +358,9 @@ public class TestWasbRemoteCallHelper performop(mockHttpClient); - Mockito.verify(mockHttpClient, times(2)).execute(Mockito.argThat(new HttpGetForServiceLocal())); - Mockito.verify(mockHttpClient, times(2)).execute(Mockito.argThat(new HttpGetForService2())); + int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 1 : 2; + Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForServiceLocal())); + Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForService2())); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml index 8c88743c33b..8cea256de8c 100644 --- a/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml +++ b/hadoop-tools/hadoop-azure/src/test/resources/azure-test.xml @@ -29,10 +29,13 @@ --> - - fs.azure.secure.mode - true - + + fs.azure.user.agent.prefix diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java index 29c59ac1033..138b491189e 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java @@ -280,7 +280,7 @@ public final class CopyListingFileStatus implements Writable { out.writeLong(getBlockSize()); out.writeLong(getModificationTime()); out.writeLong(getAccessTime()); - getPermission().write(out); + out.writeShort(getPermission().toShort()); Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN); Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN); if (aclEntries != null) { @@ -330,7 +330,7 @@ public final class CopyListingFileStatus implements Writable { blocksize = in.readLong(); modificationTime = in.readLong(); accessTime = in.readLong(); - permission.readFields(in); + permission.fromShort(in.readShort()); owner = Text.readString(in, Text.DEFAULT_MAX_LEN); group = Text.readString(in, Text.DEFAULT_MAX_LEN); byte aclEntriesSize = in.readByte(); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java index dbe750a65c4..2b3b529994f 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclUtil; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Text; @@ -403,8 +402,7 @@ public class DistCpUtils { CopyListingFileStatus copyListingFileStatus = new CopyListingFileStatus(fileStatus, chunkOffset, chunkLength); if (preserveAcls) { - FsPermission perm = fileStatus.getPermission(); - if (perm.getAclBit()) { + if (fileStatus.hasAcl()) { List aclEntries = fileSystem.getAclStatus( fileStatus.getPath()).getEntries(); copyListingFileStatus.setAclEntries(aclEntries); diff --git a/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh b/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh index 6e93ec17872..45028481c77 100755 --- a/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh +++ b/hadoop-tools/hadoop-distcp/src/main/shellprofile.d/hadoop-distcp.sh @@ -18,7 +18,7 @@ if ! declare -f hadoop_subcommand_distcp >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "distcp" "copy file or directories recursively" + hadoop_add_subcommand "distcp" client "copy file or directories recursively" fi # this can't be indented otherwise shelldocs won't get it @@ -39,7 +39,7 @@ fi if ! declare -f mapred_subcommand_distcp >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then - hadoop_add_subcommand "distcp" "copy file or directories recursively" + hadoop_add_subcommand "distcp" client "copy file or directories recursively" fi # this can't be indented otherwise shelldocs won't get it diff --git a/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh b/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh index 1ce9aeee984..364c950c466 100755 --- a/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh +++ b/hadoop-tools/hadoop-extras/src/main/shellprofile.d/hadoop-extras.sh @@ -18,7 +18,7 @@ if ! declare -f hadoop_subcommand_distch >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "distch" "distributed metadata changer" + hadoop_add_subcommand "distch" client "distributed metadata changer" fi # this can't be indented otherwise shelldocs won't get it diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java index 4386bc1bb2f..3507b7f46b7 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java @@ -447,7 +447,7 @@ public class Gridmix extends Configured implements Tool { // Create with 777 permissions final FileSystem inputFs = ioPath.getFileSystem(conf); - ioPath = ioPath.makeQualified(inputFs); + ioPath = inputFs.makeQualified(ioPath); boolean succeeded = false; try { succeeded = FileSystem.mkdirs(inputFs, ioPath, diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java index d7ef563c95a..15fc68e2d15 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/PseudoLocalFs.java @@ -116,7 +116,7 @@ class PseudoLocalFs extends FileSystem { * @throws FileNotFoundException */ long validateFileNameFormat(Path path) throws FileNotFoundException { - path = path.makeQualified(this); + path = this.makeQualified(path); boolean valid = true; long fileSize = 0; if (!path.toUri().getScheme().equals(getUri().getScheme())) { @@ -329,4 +329,10 @@ class PseudoLocalFs extends FileSystem { throw new UnsupportedOperationException("SetWorkingDirectory " + "is not supported in pseudo local file system."); } + + @Override + public Path makeQualified(Path path) { + // skip FileSystem#checkPath() to validate some other Filesystems + return path.makeQualified(this.getUri(), this.getWorkingDirectory()); + } } diff --git a/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh b/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh index b7887ba3620..55997d04e3a 100755 --- a/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh +++ b/hadoop-tools/hadoop-gridmix/src/main/shellprofile.d/hadoop-gridmix.sh @@ -18,7 +18,7 @@ if ! declare -f hadoop_subcommand_gridmix >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "gridmix" "submit a mix of synthetic job, modeling a profiled from production load" + hadoop_add_subcommand "gridmix" client "submit a mix of synthetic job, modeling a profiled from production load" fi ## @description gridmix command for hadoop diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java index 4be90c6aeb6..a75414accc0 100644 --- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java +++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFilePool.java @@ -48,8 +48,8 @@ public class TestFilePool { try { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.getLocal(conf).getRaw(); - return new Path(System.getProperty("test.build.data", "/tmp"), - "testFilePool").makeQualified(fs); + return fs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), "testFilePool")); } catch (IOException e) { fail(); } diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java index a4668ee1476..e68e83f6c6b 100644 --- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java +++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestFileQueue.java @@ -48,8 +48,8 @@ public class TestFileQueue { public static void setup() throws IOException { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.getLocal(conf).getRaw(); - final Path p = new Path(System.getProperty("test.build.data", "/tmp"), - "testFileQueue").makeQualified(fs); + final Path p = fs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), "testFileQueue")); fs.delete(p, true); final byte[] b = new byte[BLOCK]; for (int i = 0; i < NFILES; ++i) { @@ -71,8 +71,8 @@ public class TestFileQueue { public static void cleanup() throws IOException { final Configuration conf = new Configuration(); final FileSystem fs = FileSystem.getLocal(conf).getRaw(); - final Path p = new Path(System.getProperty("test.build.data", "/tmp"), - "testFileQueue").makeQualified(fs); + final Path p = fs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"), "testFileQueue")); fs.delete(p, true); } diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java index a607ece129f..7179c5d0dda 100644 --- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java +++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestPseudoLocalFs.java @@ -224,7 +224,7 @@ public class TestPseudoLocalFs { // Validate operations on valid qualified path path = new Path("myPsedoFile.1237"); - path = path.makeQualified(pfs); + path = pfs.makeQualified(path); validateGetFileStatus(pfs, path, true); validateCreate(pfs, path, true); validateOpen(pfs, path, true); diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java index 8050f33b794..44075157f5c 100644 --- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java +++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestUserResolve.java @@ -40,8 +40,8 @@ public class TestUserResolve { public static void createRootDir() throws IOException { conf = new Configuration(); fs = FileSystem.getLocal(conf); - rootDir = new Path(new Path(System.getProperty("test.build.data", "/tmp")) - .makeQualified(fs), "gridmixUserResolve"); + rootDir = new Path(fs.makeQualified(new Path( + System.getProperty("test.build.data", "/tmp"))), "gridmixUserResolve"); } /** diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java index d010d08cdc5..725cae1e3b8 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftFileStatus.java @@ -71,7 +71,7 @@ public class SwiftFileStatus extends FileStatus { * @return true if the status is considered to be a file */ @Override - public boolean isDir() { + public boolean isDirectory() { return super.isDirectory() || getLen() == 0; } @@ -79,19 +79,11 @@ public class SwiftFileStatus extends FileStatus { * A entry is a file if it is not a directory. * By implementing it and not marking as an override this * subclass builds and runs in both Hadoop versions. - * @return the opposite value to {@link #isDir()} + * @return the opposite value to {@link #isDirectory()} */ @Override public boolean isFile() { - return !isDir(); - } - - /** - * Directory test - * @return true if the file is considered to be a directory - */ - public boolean isDirectory() { - return isDir(); + return !this.isDirectory(); } @Override @@ -100,7 +92,7 @@ public class SwiftFileStatus extends FileStatus { sb.append(getClass().getSimpleName()); sb.append("{ "); sb.append("path=").append(getPath()); - sb.append("; isDirectory=").append(isDir()); + sb.append("; isDirectory=").append(isDirectory()); sb.append("; length=").append(getLen()); sb.append("; blocksize=").append(getBlockSize()); sb.append("; modification_time=").append(getModificationTime()); diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java index f2ecb0f1dae..a44051ac563 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java @@ -578,7 +578,7 @@ public class SwiftNativeFileSystemStore { //enum the child entries and everything underneath List childStats = listDirectory(srcObject, true, true); - boolean srcIsFile = !srcMetadata.isDir(); + boolean srcIsFile = !srcMetadata.isDirectory(); if (srcIsFile) { //source is a simple file OR a partitioned file @@ -945,7 +945,7 @@ public class SwiftNativeFileSystemStore { //>1 entry implies directory with children. Run through them, // but first check for the recursive flag and reject it *unless it looks // like a partitioned file (len > 0 && has children) - if (!fileStatus.isDir()) { + if (!fileStatus.isDirectory()) { LOG.debug("Multiple child entries but entry has data: assume partitioned"); } else if (!recursive) { //if there are children, unless this is a recursive operation, fail immediately diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java index f91ba3013ce..726045ee9ef 100644 --- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java +++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java @@ -278,7 +278,7 @@ public class SwiftTestUtils extends org.junit.Assert { noteAction(action); try { if (fileSystem != null) { - fileSystem.delete(new Path(cleanupPath).makeQualified(fileSystem), + fileSystem.delete(fileSystem.makeQualified(new Path(cleanupPath)), true); } } catch (Exception e) { diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/SwiftFileSystemBaseTest.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/SwiftFileSystemBaseTest.java index 12f58e64333..99e03c7a71b 100644 --- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/SwiftFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/SwiftFileSystemBaseTest.java @@ -159,7 +159,7 @@ public class SwiftFileSystemBaseTest extends Assert implements * @return a qualified path instance */ protected Path path(String pathString) { - return new Path(pathString).makeQualified(fs); + return fs.makeQualified(new Path(pathString)); } /** diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java index 21fe918e7fa..9b4ba5e8c6f 100644 --- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java +++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemDirectories.java @@ -87,7 +87,7 @@ public class TestSwiftFileSystemDirectories extends SwiftFileSystemBaseTest { assertEquals("Wrong number of elements in file status " + statusString, 1, statuses.length); SwiftFileStatus stat = (SwiftFileStatus) statuses[0]; - assertTrue("isDir(): Not a directory: " + stat, stat.isDir()); + assertTrue("isDir(): Not a directory: " + stat, stat.isDirectory()); extraStatusAssertions(stat); } @@ -135,7 +135,7 @@ public class TestSwiftFileSystemDirectories extends SwiftFileSystemBaseTest { SwiftTestUtils.writeTextFile(fs, src, "testMultiByteFilesAreFiles", false); assertIsFile(src); FileStatus status = fs.getFileStatus(src); - assertFalse(status.isDir()); + assertFalse(status.isDirectory()); } } diff --git a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java index f344093158b..419d0303a04 100644 --- a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java +++ b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestSwiftFileSystemPartitionedUploads.java @@ -126,7 +126,7 @@ public class TestSwiftFileSystemPartitionedUploads extends SwiftTestUtils.compareByteArrays(src, dest, len); FileStatus status; - final Path qualifiedPath = path.makeQualified(fs); + final Path qualifiedPath = fs.makeQualified(path); status = fs.getFileStatus(qualifiedPath); //now see what block location info comes back. //This will vary depending on the Swift version, so the results @@ -216,7 +216,7 @@ public class TestSwiftFileSystemPartitionedUploads extends private FileStatus validatePathLen(Path path, int len) throws IOException { //verify that the length is what was written in a direct status check - final Path qualifiedPath = path.makeQualified(fs); + final Path qualifiedPath = fs.makeQualified(path); FileStatus[] parentDirListing = fs.listStatus(qualifiedPath.getParent()); StringBuilder listing = lsToString(parentDirListing); String parentDirLS = listing.toString(); @@ -228,7 +228,7 @@ public class TestSwiftFileSystemPartitionedUploads extends status.getLen()); String fileInfo = qualifiedPath + " " + status; assertFalse("File claims to be a directory " + fileInfo, - status.isDir()); + status.isDirectory()); FileStatus listedFileStat = resolveChild(parentDirListing, qualifiedPath); assertNotNull("Did not find " + path + " in " + parentDirLS, diff --git a/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh b/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh index 77023ff888e..b0d606deb63 100755 --- a/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh +++ b/hadoop-tools/hadoop-rumen/src/main/shellprofile.d/hadoop-rumen.sh @@ -18,7 +18,7 @@ if ! declare -f hadoop_subcommand_rumenfolder >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "rumenfolder" "scale a rumen input trace" + hadoop_add_subcommand "rumenfolder" client "scale a rumen input trace" fi ## @description rumenfolder command for hadoop @@ -37,7 +37,7 @@ fi if ! declare -f hadoop_subcommand_rumentrace >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then - hadoop_add_subcommand "rumentrace" "convert logs into a rumen trace" + hadoop_add_subcommand "rumentrace" client "convert logs into a rumen trace" fi ## @description rumentrace command for hadoop diff --git a/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java b/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java index 206095a2576..52caaf5575b 100644 --- a/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java +++ b/hadoop-tools/hadoop-rumen/src/test/java/org/apache/hadoop/tools/rumen/TestHistograms.java @@ -57,8 +57,8 @@ public class TestHistograms { public void testHistograms() throws IOException { final Configuration conf = new Configuration(); final FileSystem lfs = FileSystem.getLocal(conf); - final Path rootInputDir = new Path( - System.getProperty("test.tools.input.dir", "")).makeQualified(lfs); + final Path rootInputDir = lfs.makeQualified(new Path( + System.getProperty("test.tools.input.dir", "target/input"))); final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests"); @@ -132,7 +132,7 @@ public class TestHistograms { final FileSystem lfs = FileSystem.getLocal(conf); for (String arg : args) { - Path filePath = new Path(arg).makeQualified(lfs); + Path filePath = lfs.makeQualified(new Path(arg)); String fileName = filePath.getName(); if (fileName.startsWith("input")) { LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 8962aba2932..e71ddff2d02 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -179,7 +179,7 @@ public class NodeInfo { } @Override - public void updateNodeHeartbeatResponseForContainersDecreasing( + public void updateNodeHeartbeatResponseForUpdatedContainers( NodeHeartbeatResponse response) { // TODO Auto-generated method stub diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index d7b159c1d84..6b7ac3cc238 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -168,7 +168,7 @@ public class RMNodeWrapper implements RMNode { } @Override - public void updateNodeHeartbeatResponseForContainersDecreasing( + public void updateNodeHeartbeatResponseForUpdatedContainers( NodeHeartbeatResponse response) { // TODO Auto-generated method stub } diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java index 9f5b293b369..0b239d0ea4e 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java @@ -22,13 +22,11 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; -import java.net.URISyntaxException; import java.net.URLEncoder; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.regex.Pattern; import java.util.TreeMap; import java.util.TreeSet; @@ -41,12 +39,12 @@ import org.apache.commons.cli.Options; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.filecache.DistributedCache; import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.mapred.FileInputFormat; @@ -56,7 +54,6 @@ import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapred.KeyValueTextInputFormat; -import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RunningJob; import org.apache.hadoop.mapred.SequenceFileAsTextInputFormat; import org.apache.hadoop.mapred.SequenceFileInputFormat; @@ -65,6 +62,7 @@ import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.mapred.lib.LazyOutputFormat; import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner; import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.streaming.io.IdentifierResolver; import org.apache.hadoop.streaming.io.InputWriter; import org.apache.hadoop.streaming.io.OutputReader; @@ -297,7 +295,10 @@ public class StreamJob implements Tool { try { Path path = new Path(file); FileSystem localFs = FileSystem.getLocal(config_); - String finalPath = path.makeQualified(localFs).toString(); + Path qualifiedPath = path.makeQualified( + localFs.getUri(), localFs.getWorkingDirectory()); + validate(qualifiedPath); + String finalPath = qualifiedPath.toString(); if(fileList.length() > 0) { fileList.append(','); } @@ -313,7 +314,6 @@ public class StreamJob implements Tool { tmpFiles = tmpFiles + "," + fileList; } config_.set("tmpfiles", tmpFiles); - validate(packageFiles_); } String fsName = cmdLine.getOptionValue("dfs"); @@ -391,14 +391,13 @@ public class StreamJob implements Tool { return OptionBuilder.withDescription(desc).create(name); } - private void validate(final List values) - throws IllegalArgumentException { - for (String file : values) { - File f = new File(file); - if (!FileUtil.canRead(f)) { - fail("File: " + f.getAbsolutePath() - + " does not exist, or is not readable."); - } + private void validate(final Path path) throws IOException { + try { + path.getFileSystem(config_).access(path, FsAction.READ); + } catch (FileNotFoundException e) { + fail("File: " + path + " does not exist."); + } catch (AccessControlException e) { + fail("File: " + path + " is not readable."); } } diff --git a/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh b/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh index c3010ffce06..be76b060595 100755 --- a/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh +++ b/hadoop-tools/hadoop-streaming/src/main/shellprofile.d/hadoop-streaming.sh @@ -18,7 +18,7 @@ if ! declare -f mapred_subcommand_streaming >/dev/null 2>/dev/null; then if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then - hadoop_add_subcommand "streaming" "launch a mapreduce streaming job" + hadoop_add_subcommand "streaming" client "launch a mapreduce streaming job" fi ## @description streaming command for mapred diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql similarity index 86% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js rename to hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql index 5d14c6f39e1..68649e6300d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/queues-selector.js +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql @@ -16,10 +16,6 @@ * limitations under the License. */ -import Ember from 'ember'; +-- Script to create a new Database in MySQL for the Federation StateStore -export default Ember.Route.extend({ - model() { - return this.store.findAll('yarn-queue'); - }, -}); \ No newline at end of file +CREATE database FederationStateStore; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql new file mode 100644 index 00000000000..eae882e4a48 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Script to generate all the stored procedures for the Federation StateStore in MySQL + +USE FederationStateStore + +DELIMITER // + +CREATE PROCEDURE sp_registerSubCluster( + IN subClusterId_IN varchar(256), + IN amRMServiceAddress_IN varchar(256), + IN clientRMServiceAddress_IN varchar(256), + IN rmAdminServiceAddress_IN varchar(256), + IN rmWebServiceAddress_IN varchar(256), + IN state_IN varchar(256), + IN lastStartTime_IN bigint, IN capability_IN varchar(6000), + OUT rowCount_OUT int) +BEGIN + DELETE FROM membership WHERE (subClusterId = subClusterId_IN); + INSERT INTO membership (subClusterId, amRMServiceAddress, clientRMServiceAddress, + rmAdminServiceAddress, rmWebServiceAddress, lastHeartBeat, state, lastStartTime, capability) + VALUES (subClusterId_IN, amRMServiceAddress_IN, clientRMServiceAddress_IN, + rmAdminServiceAddress_IN, rmWebServiceAddress_IN, NOW(), state_IN, lastStartTime_IN, capability_IN); + SELECT ROW_COUNT() INTO rowCount_OUT; +END // + +CREATE PROCEDURE sp_deregisterSubCluster( + IN subClusterId_IN varchar(256), + IN state_IN varchar(64), + OUT rowCount_OUT int) +BEGIN + UPDATE membership SET state = state_IN + WHERE (subClusterId = subClusterId_IN AND state != state_IN); + SELECT ROW_COUNT() INTO rowCount_OUT; +END // + +CREATE PROCEDURE sp_subClusterHeartbeat( + IN subClusterId_IN varchar(256), IN state_IN varchar(64), + IN capability_IN varchar(6000), OUT rowCount_OUT int) +BEGIN + UPDATE membership + SET capability = capability_IN, + state = state_IN, + lastHeartBeat = NOW() + WHERE subClusterId = subClusterId_IN; + SELECT ROW_COUNT() INTO rowCount_OUT; +END // + +CREATE PROCEDURE sp_getSubCluster( + IN subClusterId_IN varchar(256), + OUT amRMServiceAddress_OUT varchar(256), + OUT clientRMServiceAddress_OUT varchar(256), + OUT rmAdminServiceAddress_OUT varchar(256), + OUT rmWebServiceAddress_OUT varchar(256), + OUT lastHeartBeat_OUT datetime, OUT state_OUT varchar(64), + OUT lastStartTime_OUT bigint, + OUT capability_OUT varchar(6000)) +BEGIN + SELECT amRMServiceAddress, clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, state, lastStartTime, capability + INTO amRMServiceAddress_OUT, clientRMServiceAddress_OUT, rmAdminServiceAddress_OUT, + rmWebServiceAddress_OUT, lastHeartBeat_OUT, state_OUT, lastStartTime_OUT, capability_OUT + FROM membership WHERE subClusterId = subClusterId_IN; +END // + +CREATE PROCEDURE sp_getSubClusters() +BEGIN + SELECT subClusterId, amRMServiceAddress, clientRMServiceAddress, + rmAdminServiceAddress, rmWebServiceAddress, lastHeartBeat, + state, lastStartTime, capability + FROM membership; +END // + +CREATE PROCEDURE sp_addApplicationHomeSubCluster( + IN applicationId_IN varchar(64), IN homeSubCluster_IN varchar(256), + OUT storedHomeSubCluster_OUT varchar(256), OUT rowCount_OUT int) +BEGIN + INSERT INTO applicationsHomeSubCluster + (applicationId,homeSubCluster) + (SELECT applicationId_IN, homeSubCluster_IN + FROM applicationsHomeSubCluster + WHERE applicationId = applicationId_IN + HAVING COUNT(*) = 0 ); + SELECT ROW_COUNT() INTO rowCount_OUT; + SELECT homeSubCluster INTO storedHomeSubCluster_OUT + FROM applicationsHomeSubCluster + WHERE applicationId = applicationID_IN; +END // + +CREATE PROCEDURE sp_updateApplicationHomeSubCluster( + IN applicationId_IN varchar(64), + IN homeSubCluster_IN varchar(256), OUT rowCount_OUT int) +BEGIN + UPDATE applicationsHomeSubCluster + SET homeSubCluster = homeSubCluster_IN + WHERE applicationId = applicationId_IN; + SELECT ROW_COUNT() INTO rowCount_OUT; +END // + +CREATE PROCEDURE sp_getApplicationHomeSubCluster( + IN applicationId_IN varchar(64), + OUT homeSubCluster_OUT varchar(256)) +BEGIN + SELECT homeSubCluster INTO homeSubCluster_OUT + FROM applicationsHomeSubCluster + WHERE applicationId = applicationID_IN; +END // + +CREATE PROCEDURE sp_getApplicationsHomeSubCluster() +BEGIN + SELECT applicationId, homeSubCluster + FROM applicationsHomeSubCluster; +END // + +CREATE PROCEDURE sp_deleteApplicationHomeSubCluster( + IN applicationId_IN varchar(64), OUT rowCount_OUT int) +BEGIN + DELETE FROM applicationsHomeSubCluster + WHERE applicationId = applicationId_IN; + SELECT ROW_COUNT() INTO rowCount_OUT; +END // + +CREATE PROCEDURE sp_setPolicyConfiguration( + IN queue_IN varchar(256), IN policyType_IN varchar(256), + IN params_IN varbinary(32768), OUT rowCount_OUT int) +BEGIN + DELETE FROM policies WHERE queue = queue_IN; + INSERT INTO policies (queue, policyType, params) + VALUES (queue_IN, policyType_IN, params_IN); + SELECT ROW_COUNT() INTO rowCount_OUT; +END // + +CREATE PROCEDURE sp_getPoliciesConfigurations() +BEGIN + SELECT queue, policyType, params FROM policies; +END // + +CREATE PROCEDURE sp_getPolicyConfiguration( + IN queue_IN varchar(256), OUT policyType_OUT varchar(256), + OUT params_OUT varbinary(32768)) +BEGIN + SELECT policyType, params INTO policyType_OUT, params_OUT + FROM policies WHERE queue = queue_IN; +END // + +DELIMITER ; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql new file mode 100644 index 00000000000..67a181754b9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Script to generate all the tables for the Federation StateStore in MySQL + +USE FederationStateStore + +CREATE TABLE applicationsHomeSubCluster( + applicationId varchar(64) NOT NULL, + subClusterId varchar(256) NULL, + CONSTRAINT pk_applicationId PRIMARY KEY (applicationId) +); + +CREATE TABLE membership( + subClusterId varchar(256) NOT NULL, + amRMServiceAddress varchar(256) NOT NULL, + clientRMServiceAddress varchar(256) NOT NULL, + rmAdminServiceAddress varchar(256) NOT NULL, + rmWebServiceAddress varchar(256) NOT NULL, + lastHeartBeat datetime NOT NULL, + state varchar(32) NOT NULL, + lastStartTime bigint NULL, + capability varchar(6000), + CONSTRAINT pk_subClusterId PRIMARY KEY (subClusterId) +); + +CREATE TABLE policies( + queue varchar(256) NOT NULL, + policyType varchar(256) NOT NULL, + params varbinary(32768), + CONSTRAINT pk_queue PRIMARY KEY (queue) +); diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreUser.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreUser.sql new file mode 100644 index 00000000000..32f49334656 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreUser.sql @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Script to create a new User in MySQL for the Federation StateStore + +CREATE USER 'FederationUser'@'%' IDENTIFIED BY 'FederationPassword'; + +GRANT ALL PRIVILEGES ON FederationStateStore.* TO 'FederationUser'@'%'; + +FLUSH PRIVILEGES; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropDatabase.sql similarity index 86% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js rename to hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropDatabase.sql index 436c6d81b11..c915bfe510a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues/index.js +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropDatabase.sql @@ -16,10 +16,6 @@ * limitations under the License. */ -import Ember from 'ember'; +-- Script to drop the Federation StateStore in MySQL -export default Ember.Route.extend({ - beforeModel() { - this.transitionTo('yarn-queues.root'); - } -}); \ No newline at end of file +DROP DATABASE FederationStateStore; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropStoreProcedures.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropStoreProcedures.sql new file mode 100644 index 00000000000..f24f3fb22b5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropStoreProcedures.sql @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Script to drop all the stored procedures for the Federation StateStore in MySQL + +USE FederationStateStore + +DROP PROCEDURE sp_registerSubCluster; + +DROP PROCEDURE sp_deregisterSubCluster; + +DROP PROCEDURE sp_subClusterHeartbeat; + +DROP PROCEDURE sp_getSubCluster; + +DROP PROCEDURE sp_getSubClusters; + +DROP PROCEDURE sp_addApplicationHomeSubCluster; + +DROP PROCEDURE sp_updateApplicationHomeSubCluster; + +DROP PROCEDURE sp_getApplicationHomeSubCluster; + +DROP PROCEDURE sp_getApplicationsHomeSubCluster; + +DROP PROCEDURE sp_deleteApplicationHomeSubCluster; + +DROP PROCEDURE sp_setPolicyConfiguration; + +DROP PROCEDURE sp_getPolicyConfiguration; + +DROP PROCEDURE sp_getPoliciesConfigurations; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropTables.sql new file mode 100644 index 00000000000..ea6567b028b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropTables.sql @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Script to drop all the tables from the Federation StateStore in MySQL + +USE FederationStateStore + +DROP TABLE applicationsHomeSubCluster; + +DROP TABLE membership; + +DROP TABLE policies; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropUser.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropUser.sql new file mode 100644 index 00000000000..7b4bb02fb59 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/dropUser.sql @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Script to drop the user from Federation StateStore in MySQL + +DROP USER 'FederationUser'@'%'; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql new file mode 100644 index 00000000000..66d6f0e2035 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql @@ -0,0 +1,511 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +USE [FederationStateStore] +GO + +IF OBJECT_ID ( '[sp_addApplicationHomeSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_addApplicationHomeSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_addApplicationHomeSubCluster] + @applicationId VARCHAR(64), + @homeSubCluster VARCHAR(256), + @storedHomeSubCluster VARCHAR(256) OUTPUT, + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + -- If application to sub-cluster map doesn't exist, insert it. + -- Otherwise don't change the current mapping. + IF NOT EXISTS (SELECT TOP 1 * + FROM [dbo].[applicationsHomeSubCluster] + WHERE [applicationId] = @applicationId) + + INSERT INTO [dbo].[applicationsHomeSubCluster] ( + [applicationId], + [homeSubCluster]) + VALUES ( + @applicationId, + @homeSubCluster); + -- End of the IF block + + SELECT @rowCount = @@ROWCOUNT; + + SELECT @storedHomeSubCluster = [homeSubCluster] + FROM [dbo].[applicationsHomeSubCluster] + WHERE [applicationId] = @applicationId; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_updateApplicationHomeSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_updateApplicationHomeSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_updateApplicationHomeSubCluster] + @applicationId VARCHAR(64), + @homeSubCluster VARCHAR(256), + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + UPDATE [dbo].[applicationsHomeSubCluster] + SET [homeSubCluster] = @homeSubCluster + WHERE [applicationId] = @applicationid; + SELECT @rowCount = @@ROWCOUNT; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_getApplicationsHomeSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_getApplicationsHomeSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_getApplicationsHomeSubCluster] +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + SELECT [applicationId], [homeSubCluster], [createTime] + FROM [dbo].[applicationsHomeSubCluster] + END TRY + + BEGIN CATCH + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_getApplicationHomeSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_getApplicationHomeSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_getApplicationHomeSubCluster] + @applicationId VARCHAR(64), + @homeSubCluster VARCHAR(256) OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + + SELECT @homeSubCluster = [homeSubCluster] + FROM [dbo].[applicationsHomeSubCluster] + WHERE [applicationId] = @applicationid; + + END TRY + + BEGIN CATCH + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_deleteApplicationHomeSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_deleteApplicationHomeSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_deleteApplicationHomeSubCluster] + @applicationId VARCHAR(64), + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + DELETE FROM [dbo].[applicationsHomeSubCluster] + WHERE [applicationId] = @applicationId; + SELECT @rowCount = @@ROWCOUNT; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_registerSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_registerSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_registerSubCluster] + @subClusterId VARCHAR(256), + @amRMServiceAddress VARCHAR(256), + @clientRMServiceAddress VARCHAR(256), + @rmAdminServiceAddress VARCHAR(256), + @rmWebServiceAddress VARCHAR(256), + @state VARCHAR(32), + @lastStartTime BIGINT, + @capability VARCHAR(6000), + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + DELETE FROM [dbo].[membership] + WHERE [subClusterId] = @subClusterId; + INSERT INTO [dbo].[membership] ( + [subClusterId], + [amRMServiceAddress], + [clientRMServiceAddress], + [rmAdminServiceAddress], + [rmWebServiceAddress], + [lastHeartBeat], + [state], + [lastStartTime], + [capability] ) + VALUES ( + @subClusterId, + @amRMServiceAddress, + @clientRMServiceAddress, + @rmAdminServiceAddress, + @rmWebServiceAddress, + GETUTCDATE(), + @state, + @lastStartTime, + @capability); + SELECT @rowCount = @@ROWCOUNT; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_getSubClusters]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_getSubClusters]; +GO + +CREATE PROCEDURE [dbo].[sp_getSubClusters] +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + SELECT [subClusterId], [amRMServiceAddress], [clientRMServiceAddress], + [rmAdminServiceAddress], [rmWebServiceAddress], [lastHeartBeat], + [state], [lastStartTime], [capability] + FROM [dbo].[membership] + END TRY + + BEGIN CATCH + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_getSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_getSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_getSubCluster] + @subClusterId VARCHAR(256), + @amRMServiceAddress VARCHAR(256) OUTPUT, + @clientRMServiceAddress VARCHAR(256) OUTPUT, + @rmAdminServiceAddress VARCHAR(256) OUTPUT, + @rmWebServiceAddress VARCHAR(256) OUTPUT, + @lastHeartbeat DATETIME2 OUTPUT, + @state VARCHAR(256) OUTPUT, + @lastStartTime BIGINT OUTPUT, + @capability VARCHAR(6000) OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + SELECT @subClusterId = [subClusterId], + @amRMServiceAddress = [amRMServiceAddress], + @clientRMServiceAddress = [clientRMServiceAddress], + @rmAdminServiceAddress = [rmAdminServiceAddress], + @rmWebServiceAddress = [rmWebServiceAddress], + @lastHeartBeat = [lastHeartBeat], + @state = [state], + @lastStartTime = [lastStartTime], + @capability = [capability] + FROM [dbo].[membership] + WHERE [subClusterId] = @subClusterId + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + + +IF OBJECT_ID ( '[sp_subClusterHeartbeat]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_subClusterHeartbeat]; +GO + +CREATE PROCEDURE [dbo].[sp_subClusterHeartbeat] + @subClusterId VARCHAR(256), + @state VARCHAR(256), + @capability VARCHAR(6000), + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + UPDATE [dbo].[membership] + SET [state] = @state, + [lastHeartbeat] = GETUTCDATE(), + [capability] = @capability + WHERE [subClusterId] = @subClusterId; + SELECT @rowCount = @@ROWCOUNT; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_deregisterSubCluster]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_deregisterSubCluster]; +GO + +CREATE PROCEDURE [dbo].[sp_deregisterSubCluster] + @subClusterId VARCHAR(256), + @state VARCHAR(256), + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + UPDATE [dbo].[membership] + SET [state] = @state + WHERE [subClusterId] = @subClusterId; + SELECT @rowCount = @@ROWCOUNT; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_setPolicyConfiguration]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_setPolicyConfiguration]; +GO + +CREATE PROCEDURE [dbo].[sp_setPolicyConfiguration] + @queue VARCHAR(256), + @policyType VARCHAR(256), + @params VARBINARY(512), + @rowCount int OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + BEGIN TRAN + + DELETE FROM [dbo].[policies] + WHERE [queue] = @queue; + INSERT INTO [dbo].[policies] ( + [queue], + [policyType], + [params]) + VALUES ( + @queue, + @policyType, + @params); + SELECT @rowCount = @@ROWCOUNT; + + COMMIT TRAN + END TRY + + BEGIN CATCH + ROLLBACK TRAN + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_getPolicyConfiguration]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_getPolicyConfiguration]; +GO + +CREATE PROCEDURE [dbo].[sp_getPolicyConfiguration] + @queue VARCHAR(256), + @policyType VARCHAR(256) OUTPUT, + @params VARBINARY(6000) OUTPUT +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + + SELECT @policyType = [policyType], + @params = [params] + FROM [dbo].[policies] + WHERE [queue] = @queue + + END TRY + + BEGIN CATCH + + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO + +IF OBJECT_ID ( '[sp_getPoliciesConfigurations]', 'P' ) IS NOT NULL + DROP PROCEDURE [sp_getPoliciesConfigurations]; +GO + +CREATE PROCEDURE [dbo].[sp_getPoliciesConfigurations] +AS BEGIN + DECLARE @errorMessage nvarchar(4000) + + BEGIN TRY + SELECT [queue], [policyType], [params] FROM [dbo].[policies] + END TRY + + BEGIN CATCH + SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), ERROR_LINE()) + + /* raise error and terminate the execution */ + RAISERROR(@errorMessage, --- Error Message + 1, -- Severity + -1 -- State + ) WITH log + END CATCH +END; +GO \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql new file mode 100644 index 00000000000..a97385b4962 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreTables.sql @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +USE [FederationStateStore] +GO + +IF NOT EXISTS ( SELECT * FROM [FederationStateStore].sys.tables + WHERE name = 'applicationsHomeSubCluster' + AND schema_id = SCHEMA_ID('dbo')) + BEGIN + PRINT 'Table applicationsHomeSubCluster does not exist, create it...' + + SET ANSI_NULLS ON + + SET QUOTED_IDENTIFIER ON + + SET ANSI_PADDING ON + + CREATE TABLE [dbo].[applicationsHomeSubCluster]( + applicationId VARCHAR(64) COLLATE Latin1_General_100_BIN2 NOT NULL, + homeSubCluster VARCHAR(256) NOT NULL, + createTime DATETIME2 NOT NULL CONSTRAINT ts_createAppTime DEFAULT GETUTCDATE(), + + CONSTRAINT [pk_applicationId] PRIMARY KEY + ( + [applicationId] + ) + ) + + SET ANSI_PADDING OFF + + PRINT 'Table applicationsHomeSubCluster created.' + END +ELSE + PRINT 'Table applicationsHomeSubCluster exists, no operation required...' + GO +GO + +IF NOT EXISTS ( SELECT * FROM [FederationStateStore].sys.tables + WHERE name = 'membership' + AND schema_id = SCHEMA_ID('dbo')) + BEGIN + PRINT 'Table membership does not exist, create it...' + + SET ANSI_NULLS ON + + SET QUOTED_IDENTIFIER ON + + SET ANSI_PADDING ON + + CREATE TABLE [dbo].[membership]( + [subClusterId] VARCHAR(256) COLLATE Latin1_General_100_BIN2 NOT NULL, + [amRMServiceAddress] VARCHAR(256) NOT NULL, + [clientRMServiceAddress] VARCHAR(256) NOT NULL, + [rmAdminServiceAddress] VARCHAR(256) NOT NULL, + [rmWebServiceAddress] VARCHAR(256) NOT NULL, + [lastHeartBeat] DATETIME2 NOT NULL, + [state] VARCHAR(32) NOT NULL, + [lastStartTime] BIGINT NOT NULL, + [capability] VARCHAR(6000) NOT NULL, + + CONSTRAINT [pk_subClusterId] PRIMARY KEY + ( + [subClusterId] + ) + ) + + SET ANSI_PADDING OFF + + PRINT 'Table membership created.' + END +ELSE + PRINT 'Table membership exists, no operation required...' + GO +GO + +IF NOT EXISTS ( SELECT * FROM [FederationStateStore].sys.tables + WHERE name = 'policies' + AND schema_id = SCHEMA_ID('dbo')) + BEGIN + PRINT 'Table policies does not exist, create it...' + + SET ANSI_NULLS ON + + SET QUOTED_IDENTIFIER ON + + SET ANSI_PADDING ON + + CREATE TABLE [dbo].[policies]( + queue VARCHAR(256) COLLATE Latin1_General_100_BIN2 NOT NULL, + policyType VARCHAR(256) NOT NULL, + params VARBINARY(6000) NOT NULL, + + CONSTRAINT [pk_queue] PRIMARY KEY + ( + [queue] + ) + ) + + SET ANSI_PADDING OFF + + PRINT 'Table policies created.' + END +ELSE + PRINT 'Table policies exists, no operation required...' + GO +GO diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn index cf6457b0438..dcde0dc0790 100755 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn @@ -31,27 +31,28 @@ function hadoop_usage hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" hadoop_add_option "--workers" "turn on worker mode" - hadoop_add_subcommand "application" "prints application(s) report/kill application" - hadoop_add_subcommand "applicationattempt" "prints applicationattempt(s) report" - hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries" - hadoop_add_subcommand "cluster" "prints cluster information" - hadoop_add_subcommand "container" "prints container(s) report" - hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon" - hadoop_add_subcommand "envvars" "display computed Hadoop environment variables" - hadoop_add_subcommand "jar " "run a jar file" - hadoop_add_subcommand "logs" "dump container logs" - hadoop_add_subcommand "node" "prints node report(s)" - hadoop_add_subcommand "nodemanager" "run a nodemanager on each worker" - hadoop_add_subcommand "proxyserver" "run the web app proxy server" - hadoop_add_subcommand "queue" "prints queue information" - hadoop_add_subcommand "resourcemanager" "run the ResourceManager" - hadoop_add_subcommand "rmadmin" "admin tools" - hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools" - hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager daemon" - hadoop_add_subcommand "timelinereader" "run the timeline reader server" - hadoop_add_subcommand "timelineserver" "run the timeline server" - hadoop_add_subcommand "top" "view cluster information" - hadoop_add_subcommand "version" "print the version" + hadoop_add_subcommand "application" client "prints application(s) report/kill application" + hadoop_add_subcommand "applicationattempt" client "prints applicationattempt(s) report" + hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" + hadoop_add_subcommand "cluster" client "prints cluster information" + hadoop_add_subcommand "container" client "prints container(s) report" + hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon" + hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "jar " client "run a jar file" + hadoop_add_subcommand "logs" client "dump container logs" + hadoop_add_subcommand "node" admin "prints node report(s)" + hadoop_add_subcommand "nodemanager" daemon "run a nodemanager on each worker" + hadoop_add_subcommand "proxyserver" daemon "run the web app proxy server" + hadoop_add_subcommand "queue" client "prints queue information" + hadoop_add_subcommand "resourcemanager" daemon "run the ResourceManager" + hadoop_add_subcommand "rmadmin" admin "admin tools" + hadoop_add_subcommand "router" daemon "run the Router daemon" + hadoop_add_subcommand "scmadmin" admin "SharedCacheManager admin tools" + hadoop_add_subcommand "sharedcachemanager" admin "run the SharedCacheManager daemon" + hadoop_add_subcommand "timelinereader" client "run the timeline reader server" + hadoop_add_subcommand "timelineserver" daemon "run the timeline server" + hadoop_add_subcommand "top" client "view cluster information" + hadoop_add_subcommand "version" client "print the version" hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true } @@ -137,6 +138,10 @@ function yarncmd_case rmadmin) HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI' ;; + router) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.router.Router' + ;; scmadmin) HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin' ;; diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index ca879f51f99..690badf7e1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -130,6 +130,10 @@ if "%1" == "--loglevel" ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-applicationhistoryservice\target\classes ) + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes ( + set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes + ) + if exist %HADOOP_YARN_HOME%\build\test\classes ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes ) @@ -151,7 +155,7 @@ if "%1" == "--loglevel" ( set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^ application applicationattempt container node queue logs daemonlog historyserver ^ - timelineserver timelinereader classpath + timelineserver timelinereader router classpath for %%i in ( %yarncommands% ) do ( if %yarn-command% == %%i set yarncommand=true ) @@ -248,6 +252,12 @@ goto :eof set YARN_OPTS=%YARN_OPTS% %YARN_TIMELINEREADER_OPTS% goto :eof +:router + set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties + set CLASS=org.apache.hadoop.yarn.server.router.Router + set YARN_OPTS=%YARN_OPTS% %HADOOP_ROUTER_OPTS% + goto :eof + :nodemanager set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager @@ -317,6 +327,7 @@ goto :eof @echo where COMMAND is one of: @echo resourcemanager run the ResourceManager @echo nodemanager run a nodemanager on each slave + @echo router run the Router daemon @echo timelineserver run the timeline server @echo timelinereader run the timeline reader server @echo rmadmin admin tools diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh index d003adb2643..be42298c0b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh +++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh @@ -136,3 +136,15 @@ # See ResourceManager for some examples # #export YARN_SHAREDCACHEMANAGER_OPTS= + +### +# Router specific parameters +### + +# Specify the JVM options to be used when starting the Router. +# These options will be appended to the options specified as HADOOP_OPTS +# and therefore may override any similar flags set in HADOOP_OPTS +# +# See ResourceManager for some examples +# +#export YARN_ROUTER_OPTS= diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index c090749956d..6825a36ebdd 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -20,6 +20,9 @@ + + + @@ -290,7 +293,10 @@ - + + + + @@ -304,6 +310,15 @@ + + + + + + + + + @@ -579,4 +594,9 @@ + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java index 10708a0c6b6..8fceb46e4c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagementProtocol.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; @@ -196,10 +198,31 @@ public interface ContainerManagementProtocol { */ @Public @Unstable + @Deprecated IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException, IOException; + /** + *

+ * The API used by the ApplicationMaster to request for + * resource update of running containers on the NodeManager. + *

+ * + * @param request + * request to update resource of a list of containers + * @return response which includes a list of containerIds of containers + * whose resource has been successfully updated and a + * containerId-to-exception map for failed requests. + * + * @throws YarnException Exception specific to YARN + * @throws IOException IOException thrown from NodeManager + */ + @Public + @Unstable + ContainerUpdateResponse updateContainer(ContainerUpdateRequest request) + throws YarnException, IOException; + SignalContainerResponse signalToContainer(SignalContainerRequest request) throws YarnException, IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ContainerUpdateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ContainerUpdateRequest.java new file mode 100644 index 00000000000..0242c7483ec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ContainerUpdateRequest.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ContainerManagementProtocol; +import org.apache.hadoop.yarn.api.records.NMToken; +import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.util.Records; + +import java.util.List; + +/** + *

The request sent by Application Master to the + * Node Manager to change the resource quota of a container.

+ * + * @see ContainerManagementProtocol#updateContainer(ContainerUpdateRequest) + */ +@Public +@Unstable +public abstract class ContainerUpdateRequest { + + @Public + @Unstable + public static ContainerUpdateRequest newInstance( + List containersToIncrease) { + ContainerUpdateRequest request = + Records.newRecord(ContainerUpdateRequest.class); + request.setContainersToUpdate(containersToIncrease); + return request; + } + + /** + * Get a list of container tokens to be used for authorization during + * container resource update. + *

+ * Note: {@link NMToken} will be used for authenticating communication with + * {@code NodeManager}. + * @return the list of container tokens to be used for authorization during + * container resource update. + * @see NMToken + */ + @Public + @Unstable + public abstract List getContainersToUpdate(); + + /** + * Set container tokens to be used during container resource increase. + * The token is acquired from + * AllocateResponse.getUpdatedContainers. + * The token contains the container id and resource capability required for + * container resource update. + * @param containersToUpdate the list of container tokens to be used + * for container resource increase. + */ + @Public + @Unstable + public abstract void setContainersToUpdate( + List containersToUpdate); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ContainerUpdateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ContainerUpdateResponse.java new file mode 100644 index 00000000000..aa132f4da07 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ContainerUpdateResponse.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.ContainerManagementProtocol; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.SerializedException; +import org.apache.hadoop.yarn.util.Records; + +import java.util.List; +import java.util.Map; + +/** + *

+ * The response sent by the NodeManager to the + * ApplicationMaster when asked to update container resource. + *

+ * + * @see ContainerManagementProtocol#updateContainer(ContainerUpdateRequest) + */ +@Public +@Unstable +public abstract class ContainerUpdateResponse { + + public static ContainerUpdateResponse newInstance( + List successfullyUpdatedContainers, + Map failedRequests) { + ContainerUpdateResponse response = + Records.newRecord(ContainerUpdateResponse.class); + response.setSuccessfullyUpdatedContainers( + successfullyUpdatedContainers); + response.setFailedRequests(failedRequests); + return response; + } + + /** + * Get the list of containerIds of containers whose resource + * have been successfully update. + * + * @return the list of containerIds of containers whose resource have + * been successfully updated. + */ + @Public + @Unstable + public abstract List getSuccessfullyUpdatedContainers(); + + /** + * Set the list of containerIds of containers whose resource have + * been successfully updated. + * @param succeedUpdatedContainers Containers whose update request were + * successfully completed. + */ + @Private + @Unstable + public abstract void setSuccessfullyUpdatedContainers( + List succeedUpdatedContainers); + + /** + * Get the containerId-to-exception map in which the exception indicates + * error from each container for failed requests. + * @return map of containerId-to-exception + */ + @Public + @Unstable + public abstract Map getFailedRequests(); + + /** + * Set the containerId-to-exception map in which the exception indicates + * error from each container for failed requests. + * @param failedRequests Containers whose update request were failed + */ + @Private + @Unstable + public abstract void setFailedRequests( + Map failedRequests); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java index 133b377c0b5..528b6421a3f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java @@ -18,7 +18,9 @@ package org.apache.hadoop.yarn.conf; -import com.google.common.annotations.VisibleForTesting; +import java.net.InetSocketAddress; +import java.util.Collection; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -27,8 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import java.net.InetSocketAddress; -import java.util.Collection; +import com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private public class HAUtil { @@ -44,6 +45,29 @@ public class HAUtil { throw new YarnRuntimeException(BAD_CONFIG_MESSAGE_PREFIX + msg); } + /** + * Returns true if Federation is configured. + * + * @param conf Configuration + * @return true if federation is configured in the configuration; else false. + */ + public static boolean isFederationEnabled(Configuration conf) { + return conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED, + YarnConfiguration.DEFAULT_FEDERATION_ENABLED); + } + + /** + * Returns true if RM failover is enabled in a Federation setting. + * + * @param conf Configuration + * @return if RM failover is enabled in conjunction with Federation in the + * configuration; else false. + */ + public static boolean isFederationFailoverEnabled(Configuration conf) { + return conf.getBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED, + YarnConfiguration.DEFAULT_FEDERATION_FAILOVER_ENABLED); + } + /** * Returns true if Resource Manager HA is configured. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 93437e31eb3..86f45b81fe3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.ActiveStandbyElector; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; @@ -87,7 +88,17 @@ public class YarnConfiguration extends Configuration { }); Configuration.addDeprecations(new DeprecationDelta[] { new DeprecationDelta(RM_SYSTEM_METRICS_PUBLISHER_ENABLED, - SYSTEM_METRICS_PUBLISHER_ENABLED) + SYSTEM_METRICS_PUBLISHER_ENABLED), + new DeprecationDelta(RM_ZK_ACL, CommonConfigurationKeys.ZK_ACL), + new DeprecationDelta(RM_ZK_AUTH, CommonConfigurationKeys.ZK_AUTH), + new DeprecationDelta(RM_ZK_ADDRESS, + CommonConfigurationKeys.ZK_ADDRESS), + new DeprecationDelta(RM_ZK_NUM_RETRIES, + CommonConfigurationKeys.ZK_NUM_RETRIES), + new DeprecationDelta(RM_ZK_TIMEOUT_MS, + CommonConfigurationKeys.ZK_TIMEOUT_MS), + new DeprecationDelta(RM_ZK_RETRY_INTERVAL_MS, + CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS), }); } @@ -143,6 +154,9 @@ public class YarnConfiguration extends Configuration { public static final String RM_HOSTNAME = RM_PREFIX + "hostname"; + public static final String RM_EPOCH = RM_PREFIX + "epoch"; + public static final long DEFAULT_RM_EPOCH = 0L; + /** The address of the applications manager interface in the RM.*/ public static final String RM_ADDRESS = RM_PREFIX + "address"; @@ -153,6 +167,10 @@ public class YarnConfiguration extends Configuration { public static final String RM_APPLICATION_MASTER_SERVICE_PROCESSORS = RM_PREFIX + "application-master-service.processors"; + public static final String RM_AUTO_UPDATE_CONTAINERS = + RM_PREFIX + "auto-update.containers"; + public static final boolean DEFAULT_RM_AUTO_UPDATE_CONTAINERS = false; + /** The actual bind address for the RM.*/ public static final String RM_BIND_HOST = RM_PREFIX + "bind-host"; @@ -2066,6 +2084,10 @@ public class YarnConfiguration extends Configuration { = TIMELINE_SERVICE_PREFIX + "entity-file.fs-support-append"; + public static final String + TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR = + TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "with-user-dir"; + /** * Settings for timeline service v2.0 */ @@ -2555,6 +2577,172 @@ public class YarnConfiguration extends Configuration { SHARED_CACHE_PREFIX + "nm.uploader.thread-count"; public static final int DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT = 20; + //////////////////////////////// + // Federation Configs + //////////////////////////////// + + public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation."; + + public static final String FEDERATION_ENABLED = FEDERATION_PREFIX + "enabled"; + public static final boolean DEFAULT_FEDERATION_ENABLED = false; + + public static final String FEDERATION_FAILOVER_ENABLED = + FEDERATION_PREFIX + "failover.enabled"; + public static final boolean DEFAULT_FEDERATION_FAILOVER_ENABLED = true; + + public static final String FEDERATION_STATESTORE_CLIENT_CLASS = + FEDERATION_PREFIX + "state-store.class"; + + public static final String DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS = + "org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore"; + + public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS = + FEDERATION_PREFIX + "cache-ttl.secs"; + + // 5 minutes + public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60; + + public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS = + FEDERATION_PREFIX + "state-store.heartbeat-interval-secs"; + + // 5 minutes + public static final int + DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS = + 5 * 60; + + public static final String FEDERATION_MACHINE_LIST = + FEDERATION_PREFIX + "machine-list"; + + public static final String FEDERATION_CLUSTER_RESOLVER_CLASS = + FEDERATION_PREFIX + "subcluster-resolver.class"; + public static final String DEFAULT_FEDERATION_CLUSTER_RESOLVER_CLASS = + "org.apache.hadoop.yarn.server.federation.resolver." + + "DefaultSubClusterResolverImpl"; + + public static final String DEFAULT_FEDERATION_POLICY_KEY = "*"; + + public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX + + "policy-manager"; + + public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache" + + ".hadoop.yarn.server.federation.policies" + + ".manager.UniformBroadcastPolicyManager"; + + public static final String FEDERATION_POLICY_MANAGER_PARAMS = + FEDERATION_PREFIX + "policy-manager-params"; + + public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = ""; + + public static final String FEDERATION_STATESTORE_ZK_PREFIX = + FEDERATION_PREFIX + "zk-state-store."; + /** Parent znode path under which ZKRMStateStore will create znodes. */ + public static final String FEDERATION_STATESTORE_ZK_PARENT_PATH = + FEDERATION_STATESTORE_ZK_PREFIX + "parent-path"; + public static final String DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH = + "/federationstore"; + + private static final String FEDERATION_STATESTORE_SQL_PREFIX = + FEDERATION_PREFIX + "state-store.sql."; + + public static final String FEDERATION_STATESTORE_SQL_USERNAME = + FEDERATION_STATESTORE_SQL_PREFIX + "username"; + + public static final String FEDERATION_STATESTORE_SQL_PASSWORD = + FEDERATION_STATESTORE_SQL_PREFIX + "password"; + + public static final String FEDERATION_STATESTORE_SQL_URL = + FEDERATION_STATESTORE_SQL_PREFIX + "url"; + + public static final String FEDERATION_STATESTORE_SQL_JDBC_CLASS = + FEDERATION_STATESTORE_SQL_PREFIX + "jdbc-class"; + + public static final String DEFAULT_FEDERATION_STATESTORE_SQL_JDBC_CLASS = + "org.hsqldb.jdbc.JDBCDataSource"; + + public static final String FEDERATION_STATESTORE_SQL_MAXCONNECTIONS = + FEDERATION_STATESTORE_SQL_PREFIX + "max-connections"; + + public static final int DEFAULT_FEDERATION_STATESTORE_SQL_MAXCONNECTIONS = 1; + + public static final String ROUTER_PREFIX = YARN_PREFIX + "router."; + + public static final String ROUTER_BIND_HOST = ROUTER_PREFIX + "bind-host"; + + public static final String ROUTER_CLIENTRM_PREFIX = + ROUTER_PREFIX + "clientrm."; + + public static final String ROUTER_CLIENTRM_ADDRESS = + ROUTER_CLIENTRM_PREFIX + "address"; + public static final int DEFAULT_ROUTER_CLIENTRM_PORT = 8050; + public static final String DEFAULT_ROUTER_CLIENTRM_ADDRESS = + "0.0.0.0:" + DEFAULT_ROUTER_CLIENTRM_PORT; + + public static final String ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE = + ROUTER_CLIENTRM_PREFIX + "interceptor-class.pipeline"; + public static final String DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS = + "org.apache.hadoop.yarn.server.router.clientrm." + + "DefaultClientRequestInterceptor"; + + public static final String ROUTER_PIPELINE_CACHE_MAX_SIZE = + ROUTER_PREFIX + "pipeline.cache-max-size"; + public static final int DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE = 25; + + public static final String ROUTER_RMADMIN_PREFIX = ROUTER_PREFIX + "rmadmin."; + + public static final String ROUTER_RMADMIN_ADDRESS = + ROUTER_RMADMIN_PREFIX + "address"; + public static final int DEFAULT_ROUTER_RMADMIN_PORT = 8052; + public static final String DEFAULT_ROUTER_RMADMIN_ADDRESS = + "0.0.0.0:" + DEFAULT_ROUTER_RMADMIN_PORT; + + public static final String ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE = + ROUTER_RMADMIN_PREFIX + "interceptor-class.pipeline"; + public static final String DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS = + "org.apache.hadoop.yarn.server.router.rmadmin." + + "DefaultRMAdminRequestInterceptor"; + + /** + * The number of retries for GetNewApplication and SubmitApplication in + * {@code FederationClientInterceptor}. + */ + public static final String ROUTER_CLIENTRM_SUBMIT_RETRY = + ROUTER_PREFIX + "submit.retry"; + public static final int DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY = 3; + + public static final String ROUTER_WEBAPP_PREFIX = ROUTER_PREFIX + "webapp."; + + /** The address of the Router web application. */ + public static final String ROUTER_WEBAPP_ADDRESS = + ROUTER_WEBAPP_PREFIX + "address"; + + public static final int DEFAULT_ROUTER_WEBAPP_PORT = 8089; + public static final String DEFAULT_ROUTER_WEBAPP_ADDRESS = + "0.0.0.0:" + DEFAULT_ROUTER_WEBAPP_PORT; + + /** The https address of the Router web application. */ + public static final String ROUTER_WEBAPP_HTTPS_ADDRESS = + ROUTER_WEBAPP_PREFIX + "https.address"; + + public static final int DEFAULT_ROUTER_WEBAPP_HTTPS_PORT = 8091; + public static final String DEFAULT_ROUTER_WEBAPP_HTTPS_ADDRESS = + "0.0.0.0:" + DEFAULT_ROUTER_WEBAPP_HTTPS_PORT; + + public static final String ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE = + ROUTER_WEBAPP_PREFIX + "interceptor-class.pipeline"; + public static final String DEFAULT_ROUTER_WEBAPP_INTERCEPTOR_CLASS = + "org.apache.hadoop.yarn.server.router.webapp." + + "DefaultRequestInterceptorREST"; + + /** + * The interceptor class used in FederationInterceptorREST to communicate with + * each SubCluster. + */ + public static final String ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS = + ROUTER_WEBAPP_PREFIX + "default-interceptor-class"; + public static final String DEFAULT_ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS = + "org.apache.hadoop.yarn.server.router.webapp." + + "DefaultRequestInterceptorREST"; + //////////////////////////////// // Other Configs //////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto index 7c53d2eff15..22b440693d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/containermanagement_protocol.proto @@ -36,6 +36,7 @@ service ContainerManagementProtocolService { rpc stopContainers(StopContainersRequestProto) returns (StopContainersResponseProto); rpc getContainerStatuses(GetContainerStatusesRequestProto) returns (GetContainerStatusesResponseProto); rpc increaseContainersResource(IncreaseContainersResourceRequestProto) returns (IncreaseContainersResourceResponseProto); + rpc updateContainer(ContainerUpdateRequestProto) returns (ContainerUpdateResponseProto); rpc signalToContainer(SignalContainerRequestProto) returns (SignalContainerResponseProto); rpc localize(ResourceLocalizationRequestProto) returns (ResourceLocalizationResponseProto); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index 0e148966c87..b92c46e945e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto @@ -368,6 +368,15 @@ message IncreaseContainersResourceResponseProto { repeated ContainerExceptionMapProto failed_requests = 2; } +message ContainerUpdateRequestProto { + repeated hadoop.common.TokenProto update_container_token = 1; +} + +message ContainerUpdateResponseProto { + repeated ContainerIdProto succeeded_requests = 1; + repeated ContainerExceptionMapProto failed_requests = 2; +} + ////////////////////////////////////////////////////// /////// Application_History_Protocol ///////////////// ////////////////////////////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index 3da4babb8a7..c40c2c52ea0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -68,6 +68,52 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase { .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL); configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR); + // Federation default configs to be ignored + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS); + configurationPropsToSkipCompare + .add(YarnConfiguration.RM_EPOCH); + configurationPropsToSkipCompare + .add(YarnConfiguration.ROUTER_CLIENTRM_ADDRESS); + configurationPropsToSkipCompare + .add(YarnConfiguration.ROUTER_RMADMIN_ADDRESS); + configurationPropsToSkipCompare + .add(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS); + + // Federation policies configs to be ignored + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_POLICY_MANAGER); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS); + + // Federation StateStore ZK implementation configs to be ignored + configurationPropsToSkipCompare.add( + YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH); + + // Federation StateStore SQL implementation configs to be ignored + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_SQL_JDBC_CLASS); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_USERNAME); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_PASSWORD); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_URL); + configurationPropsToSkipCompare + .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_MAXCONNECTIONS); + // Ignore blacklisting nodes for AM failures feature since it is still a // "work in progress" configurationPropsToSkipCompare.add(YarnConfiguration. @@ -81,6 +127,15 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase { configurationPropsToSkipCompare .add(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED); + // skip deprecated ZooKeeper settings + configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_ADDRESS); + configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_NUM_RETRIES); + configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_TIMEOUT_MS); + configurationPropsToSkipCompare.add( + YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS); + configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_AUTH); + configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_ACL); + // Used as Java command line properties, not XML configurationPrefixToSkipCompare.add("yarn.app.container"); @@ -100,6 +155,11 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase { configurationPrefixToSkipCompare .add(YarnConfiguration.NM_CPU_RESOURCE_ENABLED); + // Ignore all Router Federation variables + + configurationPrefixToSkipCompare + .add(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY); + // Set by container-executor.cfg configurationPrefixToSkipCompare.add(YarnConfiguration.NM_USER_HOME_DIR); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java index c81d448f8b4..8171de228aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java @@ -34,17 +34,17 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; - import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; + import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -239,12 +239,12 @@ public class NMClientImpl extends NMClient { container.getNodeId().toString(), container.getId()); List increaseTokens = new ArrayList<>(); increaseTokens.add(container.getContainerToken()); - IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest - .newInstance(increaseTokens); - IncreaseContainersResourceResponse response = - proxy.getContainerManagementProtocol() - .increaseContainersResource(increaseRequest); + + ContainerUpdateRequest request = + ContainerUpdateRequest.newInstance(increaseTokens); + ContainerUpdateResponse response = + proxy.getContainerManagementProtocol().updateContainer(request); + if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(container.getId())) { Throwable t = response.getFailedRequests().get(container.getId()) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java new file mode 100644 index 00000000000..e3f91557ee4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java @@ -0,0 +1,223 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.client; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MiniYARNCluster; +import org.apache.hadoop.yarn.server.federation.failover.FederationProxyProviderUtil; +import org.apache.hadoop.yarn.server.federation.failover.FederationRMFailoverProxyProvider; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit tests for FederationRMFailoverProxyProvider. + */ +public class TestFederationRMFailoverProxyProvider { + + private Configuration conf; + private FederationStateStore stateStore; + private final String dummyCapability = "cap"; + + @Before + public void setUp() throws IOException, YarnException { + conf = new YarnConfiguration(); + stateStore = new MemoryFederationStateStore(); + stateStore.init(conf); + FederationStateStoreFacade.getInstance().reinitialize(stateStore, conf); + } + + @After + public void tearDown() throws Exception { + stateStore.close(); + stateStore = null; + } + + @Test + public void testFederationRMFailoverProxyProvider() throws Exception { + final SubClusterId subClusterId = SubClusterId.newInstance("SC-1"); + final MiniYARNCluster cluster = new MiniYARNCluster( + "testFederationRMFailoverProxyProvider", 3, 0, 1, 1); + + conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); + conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); + conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1"); + conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3"); + + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, + 2000); + + HATestUtil.setRpcAddressForRM("rm1", 10000, conf); + HATestUtil.setRpcAddressForRM("rm2", 20000, conf); + HATestUtil.setRpcAddressForRM("rm3", 30000, conf); + conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true); + + cluster.init(conf); + cluster.start(); + + // Transition rm3 to active; + makeRMActive(subClusterId, cluster, 2); + + ApplicationClientProtocol client = FederationProxyProviderUtil + .createRMProxy(conf, ApplicationClientProtocol.class, subClusterId, + UserGroupInformation.getCurrentUser()); + + // client will retry until the rm becomes active. + GetClusterMetricsResponse response = + client.getClusterMetrics(GetClusterMetricsRequest.newInstance()); + + // validate response + checkResponse(response); + + // transition rm3 to standby + cluster.getResourceManager(2).getRMContext().getRMAdminService() + .transitionToStandby(new HAServiceProtocol.StateChangeRequestInfo( + HAServiceProtocol.RequestSource.REQUEST_BY_USER)); + + // Transition rm2 to active; + makeRMActive(subClusterId, cluster, 1); + response = client.getClusterMetrics(GetClusterMetricsRequest.newInstance()); + + // validate response + checkResponse(response); + + cluster.stop(); + } + + private void checkResponse(GetClusterMetricsResponse response) { + Assert.assertNotNull(response.getClusterMetrics()); + Assert.assertEquals(0, + response.getClusterMetrics().getNumActiveNodeManagers()); + } + + private void makeRMActive(final SubClusterId subClusterId, + final MiniYARNCluster cluster, final int index) { + try { + System.out.println("Transition rm" + (index + 1) + " to active"); + String dummyAddress = "host:" + index; + cluster.getResourceManager(index).getRMContext().getRMAdminService() + .transitionToActive(new HAServiceProtocol.StateChangeRequestInfo( + HAServiceProtocol.RequestSource.REQUEST_BY_USER)); + ResourceManager rm = cluster.getResourceManager(index); + InetSocketAddress amRMAddress = + rm.getApplicationMasterService().getBindAddress(); + InetSocketAddress clientRMAddress = + rm.getClientRMService().getBindAddress(); + SubClusterRegisterRequest request = SubClusterRegisterRequest + .newInstance(SubClusterInfo.newInstance(subClusterId, + amRMAddress.getAddress().getHostAddress() + ":" + + amRMAddress.getPort(), + clientRMAddress.getAddress().getHostAddress() + ":" + + clientRMAddress.getPort(), + dummyAddress, dummyAddress, SubClusterState.SC_NEW, 1, + dummyCapability)); + stateStore.registerSubCluster(request); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Test + public void testUGIForProxyCreation() + throws IOException, InterruptedException { + conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1"); + + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + UserGroupInformation user1 = + UserGroupInformation.createProxyUser("user1", currentUser); + UserGroupInformation user2 = + UserGroupInformation.createProxyUser("user2", currentUser); + + final TestableFederationRMFailoverProxyProvider provider = + new TestableFederationRMFailoverProxyProvider(); + + InetSocketAddress addr = + conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); + final ClientRMProxy rmProxy = mock(ClientRMProxy.class); + when(rmProxy.getRMAddress(any(YarnConfiguration.class), any(Class.class))) + .thenReturn(addr); + + user1.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() { + provider.init(conf, rmProxy, ApplicationMasterProtocol.class); + return null; + } + }); + + final ProxyInfo currentProxy = provider.getProxy(); + Assert.assertEquals("user1", provider.getLastProxyUGI().getUserName()); + + user2.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() { + provider.performFailover(currentProxy.proxy); + return null; + } + }); + Assert.assertEquals("user1", provider.getLastProxyUGI().getUserName()); + + provider.close(); + } + + protected static class TestableFederationRMFailoverProxyProvider + extends FederationRMFailoverProxyProvider { + + private UserGroupInformation lastProxyUGI = null; + + @Override + protected T createRMProxy(InetSocketAddress rmAddress) throws IOException { + lastProxyUGI = UserGroupInformation.getCurrentUser(); + return super.createRMProxy(rmAddress); + } + + public UserGroupInformation getLastProxyUGI() { + return lastProxyUGI; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index fbb56b0e072..1c39fa75a0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.client.api.impl; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; @@ -36,6 +37,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.TreeSet; @@ -142,6 +144,10 @@ public class TestAMRMClient { // set the minimum allocation so that resource decrease can go under 1024 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1); + conf.setBoolean( + YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true); + conf.setInt( + YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10); yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1); yarnCluster.init(conf); yarnCluster.start(); @@ -816,7 +822,7 @@ public class TestAMRMClient { assertEquals(3, amClient.blacklistAdditions.size()); assertEquals(0, amClient.blacklistRemovals.size()); - // Add nodes[1] and nodes[2] to removal list, + // Add nodes[1] and nodes[2] to removal list, // Verify addition list remove these two nodes. List nodeList12 = new ArrayList(); nodeList12.add(nodes[1]); @@ -825,7 +831,7 @@ public class TestAMRMClient { assertEquals(1, amClient.blacklistAdditions.size()); assertEquals(2, amClient.blacklistRemovals.size()); - // Add nodes[1] again to addition list, + // Add nodes[1] again to addition list, // Verify removal list will remove this node. List nodeList1 = new ArrayList(); nodeList1.add(nodes[1]); @@ -924,8 +930,8 @@ public class TestAMRMClient { // add exp=x to ANY client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "x")); - Assert.assertEquals(1, client.ask.size()); - Assert.assertEquals("x", client.ask.iterator().next() + assertEquals(1, client.ask.size()); + assertEquals("x", client.ask.iterator().next() .getNodeLabelExpression()); // add exp=x then add exp=a to ANY in same priority, only exp=a should kept @@ -933,8 +939,8 @@ public class TestAMRMClient { 1), null, null, Priority.UNDEFINED, true, "x")); client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "a")); - Assert.assertEquals(1, client.ask.size()); - Assert.assertEquals("a", client.ask.iterator().next() + assertEquals(1, client.ask.size()); + assertEquals("a", client.ask.iterator().next() .getNodeLabelExpression()); // add exp=x to ANY, rack and node, only resource request has ANY resource @@ -943,10 +949,10 @@ public class TestAMRMClient { client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "y")); - Assert.assertEquals(1, client.ask.size()); + assertEquals(1, client.ask.size()); for (ResourceRequest req : client.ask) { if (ResourceRequest.ANY.equals(req.getResourceName())) { - Assert.assertEquals("y", req.getNodeLabelExpression()); + assertEquals("y", req.getNodeLabelExpression()); } else { Assert.assertNull(req.getNodeLabelExpression()); } @@ -957,7 +963,7 @@ public class TestAMRMClient { new String[] { "node1", "node2" }, Priority.UNDEFINED, true, "y")); for (ResourceRequest req : client.ask) { if (ResourceRequest.ANY.equals(req.getResourceName())) { - Assert.assertEquals("y", req.getNodeLabelExpression()); + assertEquals("y", req.getNodeLabelExpression()); } else { Assert.assertNull(req.getNodeLabelExpression()); } @@ -971,7 +977,7 @@ public class TestAMRMClient { } catch (InvalidContainerRequestException e) { return; } - Assert.fail(); + fail(); } @Test(timeout=30000) @@ -1042,7 +1048,8 @@ public class TestAMRMClient { // get allocations AllocateResponse allocResponse = amClient.allocate(0.1f); List containers = allocResponse.getAllocatedContainers(); - Assert.assertEquals(num, containers.size()); + assertEquals(num, containers.size()); + // build container launch context Credentials ts = new Credentials(); DataOutputBuffer dob = new DataOutputBuffer(); @@ -1083,14 +1090,14 @@ public class TestAMRMClient { private void doContainerResourceChange( final AMRMClient amClient, List containers) throws YarnException, IOException { - Assert.assertEquals(3, containers.size()); + assertEquals(3, containers.size()); // remember the container IDs Container container1 = containers.get(0); Container container2 = containers.get(1); Container container3 = containers.get(2); AMRMClientImpl amClientImpl = (AMRMClientImpl) amClient; - Assert.assertEquals(0, amClientImpl.change.size()); + assertEquals(0, amClientImpl.change.size()); // verify newer request overwrites older request for the container1 amClientImpl.requestContainerUpdate(container1, UpdateContainerRequest.newInstance(container1.getVersion(), @@ -1100,21 +1107,21 @@ public class TestAMRMClient { UpdateContainerRequest.newInstance(container1.getVersion(), container1.getId(), ContainerUpdateType.INCREASE_RESOURCE, Resource.newInstance(4096, 1), null)); - Assert.assertEquals(Resource.newInstance(4096, 1), + assertEquals(Resource.newInstance(4096, 1), amClientImpl.change.get(container1.getId()).getValue().getCapability()); // verify new decrease request cancels old increase request for container1 amClientImpl.requestContainerUpdate(container1, UpdateContainerRequest.newInstance(container1.getVersion(), container1.getId(), ContainerUpdateType.DECREASE_RESOURCE, Resource.newInstance(512, 1), null)); - Assert.assertEquals(Resource.newInstance(512, 1), + assertEquals(Resource.newInstance(512, 1), amClientImpl.change.get(container1.getId()).getValue().getCapability()); // request resource increase for container2 amClientImpl.requestContainerUpdate(container2, UpdateContainerRequest.newInstance(container2.getVersion(), container2.getId(), ContainerUpdateType.INCREASE_RESOURCE, Resource.newInstance(2048, 1), null)); - Assert.assertEquals(Resource.newInstance(2048, 1), + assertEquals(Resource.newInstance(2048, 1), amClientImpl.change.get(container2.getId()).getValue().getCapability()); // verify release request will cancel pending change requests for the same // container @@ -1122,27 +1129,357 @@ public class TestAMRMClient { UpdateContainerRequest.newInstance(container3.getVersion(), container3.getId(), ContainerUpdateType.INCREASE_RESOURCE, Resource.newInstance(2048, 1), null)); - Assert.assertEquals(3, amClientImpl.pendingChange.size()); + assertEquals(3, amClientImpl.pendingChange.size()); amClientImpl.releaseAssignedContainer(container3.getId()); - Assert.assertEquals(2, amClientImpl.pendingChange.size()); + assertEquals(2, amClientImpl.pendingChange.size()); // as of now: container1 asks to decrease to (512, 1) // container2 asks to increase to (2048, 1) // send allocation requests AllocateResponse allocResponse = amClient.allocate(0.1f); - Assert.assertEquals(0, amClientImpl.change.size()); + assertEquals(0, amClientImpl.change.size()); // we should get decrease confirmation right away List updatedContainers = allocResponse.getUpdatedContainers(); - Assert.assertEquals(1, updatedContainers.size()); + assertEquals(1, updatedContainers.size()); // we should get increase allocation after the next NM's heartbeat to RM triggerSchedulingWithNMHeartBeat(); // get allocations allocResponse = amClient.allocate(0.1f); updatedContainers = allocResponse.getUpdatedContainers(); - Assert.assertEquals(1, updatedContainers.size()); + assertEquals(1, updatedContainers.size()); } + @Test(timeout=60000) + public void testAMRMClientWithContainerPromotion() + throws YarnException, IOException { + AMRMClientImpl amClient = + (AMRMClientImpl) AMRMClient + .createAMRMClient(); + //asserting we are not using the singleton instance cache + Assert.assertSame(NMTokenCache.getSingleton(), + amClient.getNMTokenCache()); + amClient.init(conf); + amClient.start(); + + // start am nm client + NMClientImpl nmClient = (NMClientImpl) NMClient.createNMClient(); + Assert.assertNotNull(nmClient); + // asserting we are using the singleton instance cache + Assert.assertSame( + NMTokenCache.getSingleton(), nmClient.getNMTokenCache()); + nmClient.init(conf); + nmClient.start(); + assertEquals(STATE.STARTED, nmClient.getServiceState()); + + amClient.registerApplicationMaster("Host", 10000, ""); + // setup container request + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + // START OPPORTUNISTIC Container, Send allocation request to RM + amClient.addContainerRequest( + new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, + true, null, ExecutionTypeRequest + .newInstance(ExecutionType.OPPORTUNISTIC, true))); + + int oppContainersRequestedAny = + amClient.getTable(0).get(priority2, ResourceRequest.ANY, + ExecutionType.OPPORTUNISTIC, capability).remoteRequest + .getNumContainers(); + + assertEquals(1, oppContainersRequestedAny); + assertEquals(1, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + // RM should allocate container within 2 calls to allocate() + int allocatedContainerCount = 0; + Map allocatedOpportContainers = new HashMap<>(); + int iterationsLeft = 50; + + amClient.getNMTokenCache().clearCache(); + assertEquals(0, + amClient.getNMTokenCache().numberOfTokensInCache()); + + AllocateResponse allocResponse = null; + while (allocatedContainerCount < oppContainersRequestedAny + && iterationsLeft-- > 0) { + allocResponse = amClient.allocate(0.1f); + // let NM heartbeat to RM and trigger allocations + //triggerSchedulingWithNMHeartBeat(); + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + allocatedContainerCount += + allocResponse.getAllocatedContainers().size(); + for (Container container : allocResponse.getAllocatedContainers()) { + if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) { + allocatedOpportContainers.put(container.getId(), container); + } + } + if (allocatedContainerCount < oppContainersRequestedAny) { + // sleep to let NM's heartbeat to RM and trigger allocations + sleep(100); + } + } + + assertEquals(oppContainersRequestedAny, allocatedContainerCount); + assertEquals(oppContainersRequestedAny, allocatedOpportContainers.size()); + + startContainer(allocResponse, nmClient); + + // SEND PROMOTION REQUEST TO RM + try { + Container c = allocatedOpportContainers.values().iterator().next(); + amClient.requestContainerUpdate( + c, UpdateContainerRequest.newInstance(c.getVersion(), + c.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, + null, ExecutionType.OPPORTUNISTIC)); + fail("Should throw Exception.."); + } catch (IllegalArgumentException e) { + System.out.println("## " + e.getMessage()); + assertTrue(e.getMessage().contains( + "target should be GUARANTEED and original should be OPPORTUNISTIC")); + } + + Container c = allocatedOpportContainers.values().iterator().next(); + amClient.requestContainerUpdate( + c, UpdateContainerRequest.newInstance(c.getVersion(), + c.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, + null, ExecutionType.GUARANTEED)); + iterationsLeft = 120; + Map updatedContainers = new HashMap<>(); + // do a few iterations to ensure RM is not going to send new containers + while (iterationsLeft-- > 0 && updatedContainers.isEmpty()) { + // inform RM of rejection + allocResponse = amClient.allocate(0.1f); + // RM did not send new containers because AM does not need any + if (allocResponse.getUpdatedContainers() != null) { + for (UpdatedContainer updatedContainer : allocResponse + .getUpdatedContainers()) { + System.out.println("Got update.."); + updatedContainers.put(updatedContainer.getContainer().getId(), + updatedContainer); + } + } + if (iterationsLeft > 0) { + // sleep to make sure NM's heartbeat + sleep(100); + } + } + assertEquals(1, updatedContainers.size()); + + for (ContainerId cId : allocatedOpportContainers.keySet()) { + Container orig = allocatedOpportContainers.get(cId); + UpdatedContainer updatedContainer = updatedContainers.get(cId); + assertNotNull(updatedContainer); + assertEquals(ExecutionType.GUARANTEED, + updatedContainer.getContainer().getExecutionType()); + assertEquals(orig.getResource(), + updatedContainer.getContainer().getResource()); + assertEquals(orig.getNodeId(), + updatedContainer.getContainer().getNodeId()); + assertEquals(orig.getVersion() + 1, + updatedContainer.getContainer().getVersion()); + } + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + // SEND UPDATE EXECTYPE UPDATE TO NM + updateContainerExecType(allocResponse, ExecutionType.GUARANTEED, nmClient); + + amClient.ask.clear(); + } + + @Test(timeout=60000) + public void testAMRMClientWithContainerDemotion() + throws YarnException, IOException { + AMRMClientImpl amClient = + (AMRMClientImpl) AMRMClient + .createAMRMClient(); + //asserting we are not using the singleton instance cache + Assert.assertSame(NMTokenCache.getSingleton(), + amClient.getNMTokenCache()); + amClient.init(conf); + amClient.start(); + + NMClientImpl nmClient = (NMClientImpl) NMClient.createNMClient(); + Assert.assertNotNull(nmClient); + // asserting we are using the singleton instance cache + Assert.assertSame( + NMTokenCache.getSingleton(), nmClient.getNMTokenCache()); + nmClient.init(conf); + nmClient.start(); + assertEquals(STATE.STARTED, nmClient.getServiceState()); + + amClient.registerApplicationMaster("Host", 10000, ""); + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + // START OPPORTUNISTIC Container, Send allocation request to RM + amClient.addContainerRequest( + new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, + true, null, ExecutionTypeRequest + .newInstance(ExecutionType.GUARANTEED, true))); + + int oppContainersRequestedAny = + amClient.getTable(0).get(priority2, ResourceRequest.ANY, + ExecutionType.GUARANTEED, capability).remoteRequest + .getNumContainers(); + + assertEquals(1, oppContainersRequestedAny); + assertEquals(1, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + // RM should allocate container within 2 calls to allocate() + int allocatedContainerCount = 0; + Map allocatedGuaranteedContainers = new HashMap<>(); + int iterationsLeft = 50; + + amClient.getNMTokenCache().clearCache(); + assertEquals(0, + amClient.getNMTokenCache().numberOfTokensInCache()); + + AllocateResponse allocResponse = null; + while (allocatedContainerCount < oppContainersRequestedAny + && iterationsLeft-- > 0) { + allocResponse = amClient.allocate(0.1f); + // let NM heartbeat to RM and trigger allocations + //triggerSchedulingWithNMHeartBeat(); + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + allocatedContainerCount += + allocResponse.getAllocatedContainers().size(); + for (Container container : allocResponse.getAllocatedContainers()) { + if (container.getExecutionType() == ExecutionType.GUARANTEED) { + allocatedGuaranteedContainers.put(container.getId(), container); + } + } + if (allocatedContainerCount < oppContainersRequestedAny) { + // sleep to let NM's heartbeat to RM and trigger allocations + sleep(100); + } + } + assertEquals(oppContainersRequestedAny, allocatedContainerCount); + assertEquals(oppContainersRequestedAny, + allocatedGuaranteedContainers.size()); + startContainer(allocResponse, nmClient); + + // SEND DEMOTION REQUEST TO RM + try { + Container c = allocatedGuaranteedContainers.values().iterator().next(); + amClient.requestContainerUpdate( + c, UpdateContainerRequest.newInstance(c.getVersion(), + c.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE, + null, ExecutionType.GUARANTEED)); + fail("Should throw Exception.."); + } catch (IllegalArgumentException e) { + System.out.println("## " + e.getMessage()); + assertTrue(e.getMessage().contains( + "target should be OPPORTUNISTIC and original should be GUARANTEED")); + } + + Container c = allocatedGuaranteedContainers.values().iterator().next(); + amClient.requestContainerUpdate( + c, UpdateContainerRequest.newInstance(c.getVersion(), + c.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE, + null, ExecutionType.OPPORTUNISTIC)); + iterationsLeft = 120; + Map updatedContainers = new HashMap<>(); + // do a few iterations to ensure RM is not going to send new containers + while (iterationsLeft-- > 0 && updatedContainers.isEmpty()) { + // inform RM of rejection + allocResponse = amClient.allocate(0.1f); + // RM did not send new containers because AM does not need any + if (allocResponse.getUpdatedContainers() != null) { + for (UpdatedContainer updatedContainer : allocResponse + .getUpdatedContainers()) { + System.out.println("Got update.."); + updatedContainers.put(updatedContainer.getContainer().getId(), + updatedContainer); + } + } + if (iterationsLeft > 0) { + // sleep to make sure NM's heartbeat + sleep(100); + } + } + assertEquals(1, updatedContainers.size()); + + for (ContainerId cId : allocatedGuaranteedContainers.keySet()) { + Container orig = allocatedGuaranteedContainers.get(cId); + UpdatedContainer updatedContainer = updatedContainers.get(cId); + assertNotNull(updatedContainer); + assertEquals(ExecutionType.OPPORTUNISTIC, + updatedContainer.getContainer().getExecutionType()); + assertEquals(orig.getResource(), + updatedContainer.getContainer().getResource()); + assertEquals(orig.getNodeId(), + updatedContainer.getContainer().getNodeId()); + assertEquals(orig.getVersion() + 1, + updatedContainer.getContainer().getVersion()); + } + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + updateContainerExecType(allocResponse, ExecutionType.OPPORTUNISTIC, + nmClient); + amClient.ask.clear(); + } + + private void updateContainerExecType(AllocateResponse allocResponse, + ExecutionType expectedExecType, NMClientImpl nmClient) + throws IOException, YarnException { + for (UpdatedContainer updatedContainer : allocResponse + .getUpdatedContainers()) { + Container container = updatedContainer.getContainer(); + nmClient.increaseContainerResource(container); + // NodeManager may still need some time to get the stable + // container status + while (true) { + ContainerStatus status = nmClient + .getContainerStatus(container.getId(), container.getNodeId()); + if (status.getExecutionType() == expectedExecType) { + break; + } + sleep(10); + } + } + } + + private void startContainer(AllocateResponse allocResponse, + NMClientImpl nmClient) throws IOException, YarnException { + // START THE CONTAINER IN NM + // build container launch context + Credentials ts = new Credentials(); + DataOutputBuffer dob = new DataOutputBuffer(); + ts.writeTokenStorageToStream(dob); + ByteBuffer securityTokens = + ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + // start a process long enough for increase/decrease action to take effect + ContainerLaunchContext clc = BuilderUtils.newContainerLaunchContext( + Collections.emptyMap(), + new HashMap(), Arrays.asList("sleep", "100"), + new HashMap(), securityTokens, + new HashMap()); + // start the containers and make sure they are in RUNNING state + for (Container container : allocResponse.getAllocatedContainers()) { + nmClient.startContainer(container, clc); + // NodeManager may still need some time to get the stable + // container status + while (true) { + ContainerStatus status = nmClient + .getContainerStatus(container.getId(), container.getNodeId()); + if (status.getState() == ContainerState.RUNNING) { + break; + } + sleep(10); + } + } + } + + private void testAllocation(final AMRMClientImpl amClient) throws YarnException, IOException { // setup container request @@ -1172,7 +1509,7 @@ public class TestAMRMClient { Set releases = new TreeSet(); amClient.getNMTokenCache().clearCache(); - Assert.assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache()); + assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache()); HashMap receivedNMTokens = new HashMap(); while (allocatedContainerCount < containersRequestedAny @@ -1192,7 +1529,7 @@ public class TestAMRMClient { for (NMToken token : allocResponse.getNMTokens()) { String nodeID = token.getNodeId().toString(); if (receivedNMTokens.containsKey(nodeID)) { - Assert.fail("Received token again for : " + nodeID); + fail("Received token again for : " + nodeID); } receivedNMTokens.put(nodeID, token.getToken()); } @@ -1204,7 +1541,7 @@ public class TestAMRMClient { } // Should receive atleast 1 token - Assert.assertTrue(receivedNMTokens.size() > 0 + assertTrue(receivedNMTokens.size() > 0 && receivedNMTokens.size() <= nodeCount); assertEquals(allocatedContainerCount, containersRequestedAny); @@ -1254,7 +1591,7 @@ public class TestAMRMClient { assertEquals(2, amClient.release.size()); assertEquals(3, amClient.ask.size()); snoopRequest = amClient.ask.iterator().next(); - // verify that the remove request made in between makeRequest and allocate + // verify that the remove request made in between makeRequest and allocate // has not been lost assertEquals(0, snoopRequest.getNumContainers()); @@ -1444,7 +1781,7 @@ public class TestAMRMClient { org.apache.hadoop.security.token.Token amrmToken_1 = getAMRMToken(); Assert.assertNotNull(amrmToken_1); - Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(), + assertEquals(amrmToken_1.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); // Wait for enough time and make sure the roll_over happens @@ -1459,7 +1796,7 @@ public class TestAMRMClient { org.apache.hadoop.security.token.Token amrmToken_2 = getAMRMToken(); Assert.assertNotNull(amrmToken_2); - Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(), + assertEquals(amrmToken_2.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); Assert.assertNotEquals(amrmToken_1, amrmToken_2); @@ -1474,7 +1811,7 @@ public class TestAMRMClient { AMRMTokenIdentifierForTest newVersionTokenIdentifier = new AMRMTokenIdentifierForTest(amrmToken_2.decodeIdentifier(), "message"); - Assert.assertEquals("Message is changed after set to newVersionTokenIdentifier", + assertEquals("Message is changed after set to newVersionTokenIdentifier", "message", newVersionTokenIdentifier.getMessage()); org.apache.hadoop.security.token.Token newVersionToken = new org.apache.hadoop.security.token.Token ( @@ -1530,10 +1867,10 @@ public class TestAMRMClient { .getBindAddress(), conf); } }).allocate(Records.newRecord(AllocateRequest.class)); - Assert.fail("The old Token should not work"); + fail("The old Token should not work"); } catch (Exception ex) { - Assert.assertTrue(ex instanceof InvalidToken); - Assert.assertTrue(ex.getMessage().contains( + assertTrue(ex instanceof InvalidToken); + assertTrue(ex.getMessage().contains( "Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId())); } @@ -1560,7 +1897,7 @@ public class TestAMRMClient { org.apache.hadoop.security.token.Token token = iter.next(); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { if (result != null) { - Assert.fail("credentials has more than one AMRM token." + fail("credentials has more than one AMRM token." + " token1: " + result + " token2: " + token); } result = (org.apache.hadoop.security.token.Token) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java index 14df94abf35..6a063e68ce4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java @@ -151,13 +151,13 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest { YarnClient rmClient = YarnClient.createYarnClient()) { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true); - conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500); - conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500); - conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500); + conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500); + conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500); + conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500); // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3 conf.setInt( - YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6); + YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 20); cluster.init(conf); cluster.start(); final Configuration yarnConf = cluster.getConfig(); @@ -198,7 +198,7 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest { lastToken = response.getAMRMToken(); // Time slot to be sure the AMRMProxy renew the token - Thread.sleep(1500); + Thread.sleep(4500); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java index 1034f7eacc5..9b79e2d6faf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java @@ -301,7 +301,6 @@ public class TestNMClient { assertTrue("The thrown exception is not expected", e.getMessage().contains("is not handled by this NodeManager")); } - // increaseContainerResource shouldn't be called before startContainer, // otherwise, NodeManager cannot find the container try { @@ -398,6 +397,8 @@ public class TestNMClient { "will be Rolled-back", Arrays.asList(new Integer[] {-1000})); testCommitContainer(container.getId(), true); testReInitializeContainer(container.getId(), clc, false); + testGetContainerStatus(container, i, ContainerState.RUNNING, + "will be Re-initialized", Arrays.asList(new Integer[] {-1000})); testCommitContainer(container.getId(), false); } else { testReInitializeContainer(container.getId(), clc, true); @@ -449,24 +450,21 @@ public class TestNMClient { ContainerState state, String diagnostics, List exitStatuses) throws YarnException, IOException { while (true) { - try { - ContainerStatus status = nmClient.getContainerStatus( - container.getId(), container.getNodeId()); - // NodeManager may still need some time to get the stable - // container status - if (status.getState() == state) { - assertEquals(container.getId(), status.getContainerId()); - assertTrue("" + index + ": " + status.getDiagnostics(), - status.getDiagnostics().contains(diagnostics)); - - assertTrue("Exit Statuses are supposed to be in: " + exitStatuses + - ", but the actual exit status code is: " + status.getExitStatus(), - exitStatuses.contains(status.getExitStatus())); - break; - } - Thread.sleep(100); - } catch (InterruptedException e) { - e.printStackTrace(); + sleep(250); + ContainerStatus status = nmClient.getContainerStatus( + container.getId(), container.getNodeId()); + // NodeManager may still need some time to get the stable + // container status + if (status.getState() == state) { + assertEquals(container.getId(), status.getContainerId()); + assertTrue("" + index + ": " + status.getDiagnostics(), + status.getDiagnostics().contains(diagnostics)); + + assertTrue("Exit Statuses are supposed to be in: " + exitStatuses + + ", but the actual exit status code is: " + + status.getExitStatus(), + exitStatuses.contains(status.getExitStatus())); + break; } } } @@ -476,10 +474,10 @@ public class TestNMClient { try { nmClient.increaseContainerResource(container); } catch (YarnException e) { - // NM container will only be in SCHEDULED state, so expect the increase - // action to fail. + // NM container increase container resource should fail without a version + // increase action to fail. if (!e.getMessage().contains( - "can only be changed when a container is in RUNNING state")) { + container.getId() + " has update version ")) { throw (AssertionError) (new AssertionError("Exception is not expected: " + e) .initCause(e)); @@ -559,9 +557,7 @@ public class TestNMClient { ContainerLaunchContext clc, boolean autoCommit) throws YarnException, IOException { try { - sleep(250); nmClient.reInitializeContainer(containerId, clc, autoCommit); - sleep(250); } catch (YarnException e) { // NM container will only be in SCHEDULED state, so expect the increase // action to fail. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 7d7272a5818..41ef404be6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -489,9 +489,8 @@ public class TestYarnClient { } reports = client.getApplications(appTypes, appStates); - Assert.assertEquals(reports.size(), 1); - Assert - .assertTrue((reports.get(0).getApplicationType().equals("NON-YARN"))); + Assert.assertEquals(1, reports.size()); + Assert.assertEquals("NON-YARN", reports.get(0).getApplicationType()); for (ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } @@ -1501,8 +1500,8 @@ public class TestYarnClient { ReservationRequests reservationRequests = response.getReservationAllocationState().get(0) .getReservationDefinition().getReservationRequests(); - Assert.assertTrue( - reservationRequests.getInterpreter().toString().equals("R_ALL")); + Assert.assertEquals("R_ALL", + reservationRequests.getInterpreter().toString()); Assert.assertTrue(reservationRequests.getReservationResources().get(0) .getDuration() == duration); } finally { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 505e20f509c..f17cf8c500b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -341,4 +341,26 @@ + + + + java9 + + 9 + + + + + maven-compiler-plugin + + + src/main/java/org/apache/hadoop/yarn/webapp/hamlet/** + src/main/test/org/apache/haodop/yarn/webapp/hamlet/** + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java index 873dcb780bc..7e471f34de3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagementProtocolPBClientImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; @@ -45,10 +47,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CommitResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ContainerUpdateRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ContainerUpdateResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReInitializeContainerRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReInitializeContainerResponsePBImpl; @@ -56,8 +58,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ResourceLocalizationRe import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ResourceLocalizationResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RestartContainerResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb - .RollbackResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RollbackResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl; @@ -71,8 +72,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.proto.YarnProtos; import org.apache.hadoop.yarn.proto.YarnServiceProtos; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.ResourceLocalizationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SignalContainerRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto; @@ -161,14 +162,35 @@ public class ContainerManagementProtocolPBClientImpl implements ContainerManagem } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException, IOException { - IncreaseContainersResourceRequestProto requestProto = - ((IncreaseContainersResourceRequestPBImpl)request).getProto(); try { - return new IncreaseContainersResourceResponsePBImpl( - proxy.increaseContainersResource(null, requestProto)); + ContainerUpdateRequest req = + ContainerUpdateRequest.newInstance(request.getContainersToIncrease()); + ContainerUpdateRequestProto reqProto = + ((ContainerUpdateRequestPBImpl) req).getProto(); + ContainerUpdateResponse resp = new ContainerUpdateResponsePBImpl( + proxy.updateContainer(null, reqProto)); + return IncreaseContainersResourceResponse + .newInstance(resp.getSuccessfullyUpdatedContainers(), + resp.getFailedRequests()); + + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + ContainerUpdateRequestProto requestProto = + ((ContainerUpdateRequestPBImpl)request).getProto(); + try { + return new ContainerUpdateResponsePBImpl( + proxy.updateContainer(null, requestProto)); } catch (ServiceException e) { RPCUtil.unwrapAndThrowException(e); return null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagementProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagementProtocolPBServiceImpl.java index fb8eead247b..68e164582d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagementProtocolPBServiceImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagementProtocolPBServiceImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerResponse; @@ -34,6 +36,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CommitResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ContainerUpdateRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ContainerUpdateResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl; @@ -74,6 +78,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponsePro import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.CommitResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateResponseProto; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -137,8 +143,12 @@ public class ContainerManagementProtocolPBServiceImpl implements ContainerManage IncreaseContainersResourceRequestPBImpl request = new IncreaseContainersResourceRequestPBImpl(proto); try { + ContainerUpdateResponse resp = real.updateContainer(ContainerUpdateRequest + .newInstance(request.getContainersToIncrease())); IncreaseContainersResourceResponse response = - real.increaseContainersResource(request); + IncreaseContainersResourceResponse + .newInstance(resp.getSuccessfullyUpdatedContainers(), + resp.getFailedRequests()); return ((IncreaseContainersResourceResponsePBImpl)response).getProto(); } catch (YarnException e) { throw new ServiceException(e); @@ -147,6 +157,22 @@ public class ContainerManagementProtocolPBServiceImpl implements ContainerManage } } + @Override + public ContainerUpdateResponseProto updateContainer( + RpcController controller, ContainerUpdateRequestProto proto) + throws ServiceException { + ContainerUpdateRequestPBImpl request = + new ContainerUpdateRequestPBImpl(proto); + try { + ContainerUpdateResponse response = real.updateContainer(request); + return ((ContainerUpdateResponsePBImpl)response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public SignalContainerResponseProto signalToContainer(RpcController arg0, SignalContainerRequestProto proto) throws ServiceException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ContainerUpdateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ContainerUpdateRequestPBImpl.java new file mode 100644 index 00000000000..2736c8a14c8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ContainerUpdateRequestPBImpl.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import com.google.protobuf.TextFormat; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateRequestProtoOrBuilder; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +/** + *

An implementation of ContainerUpdateRequest.

+ * + * @see ContainerUpdateRequest + */ +@Private +@Unstable +public class ContainerUpdateRequestPBImpl extends ContainerUpdateRequest { + private ContainerUpdateRequestProto proto = + ContainerUpdateRequestProto.getDefaultInstance(); + private ContainerUpdateRequestProto.Builder builder = null; + private boolean viaProto = false; + + private List containersToUpdate = null; + + public ContainerUpdateRequestPBImpl() { + builder = ContainerUpdateRequestProto.newBuilder(); + } + + public ContainerUpdateRequestPBImpl(ContainerUpdateRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public List getContainersToUpdate() { + if (containersToUpdate != null) { + return containersToUpdate; + } + ContainerUpdateRequestProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getUpdateContainerTokenList(); + containersToUpdate = new ArrayList<>(); + for (TokenProto c : list) { + containersToUpdate.add(convertFromProtoFormat(c)); + } + return containersToUpdate; + } + + @Override + public void setContainersToUpdate(List containersToUpdate) { + maybeInitBuilder(); + if (containersToUpdate == null) { + builder.clearUpdateContainerToken(); + } + this.containersToUpdate = containersToUpdate; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + public ContainerUpdateRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private Token convertFromProtoFormat(TokenProto p) { + return new TokenPBImpl(p); + } + + private TokenProto convertToProtoFormat(Token t) { + return ((TokenPBImpl) t).getProto(); + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ContainerUpdateRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.containersToUpdate != null) { + addUpdateContainersToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void addUpdateContainersToProto() { + maybeInitBuilder(); + builder.clearUpdateContainerToken(); + if (this.containersToUpdate == null) { + return; + } + Iterable iterable = new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + private Iterator iter = containersToUpdate.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public TokenProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + builder.addAllUpdateContainerToken(iterable); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ContainerUpdateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ContainerUpdateResponsePBImpl.java new file mode 100644 index 00000000000..78e74ec035c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ContainerUpdateResponsePBImpl.java @@ -0,0 +1,241 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import com.google.protobuf.TextFormat; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.SerializedException; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerExceptionMapProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateResponseProtoOrBuilder; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + + +/** + *

An implementation of ContainerUpdateResponse.

+ * + * @see ContainerUpdateResponse + */ +@Private +@Unstable +public class ContainerUpdateResponsePBImpl extends ContainerUpdateResponse { + private ContainerUpdateResponseProto proto = + ContainerUpdateResponseProto.getDefaultInstance(); + private ContainerUpdateResponseProto.Builder builder = null; + private boolean viaProto = false; + private List succeededRequests = null; + private Map failedRequests = null; + + public ContainerUpdateResponsePBImpl() { + builder = ContainerUpdateResponseProto.newBuilder(); + } + + public ContainerUpdateResponsePBImpl(ContainerUpdateResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public List getSuccessfullyUpdatedContainers() { + initSucceededRequests(); + return this.succeededRequests; + } + + @Override + public void setSuccessfullyUpdatedContainers(List succeeded) { + maybeInitBuilder(); + if (succeeded == null) { + builder.clearSucceededRequests(); + } + this.succeededRequests = succeeded; + } + + @Override + public Map getFailedRequests() { + initFailedRequests(); + return this.failedRequests; + } + + @Override + public void setFailedRequests( + Map failedRequests) { + maybeInitBuilder(); + if (failedRequests == null) { + builder.clearFailedRequests(); + } + this.failedRequests = failedRequests; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + public ContainerUpdateResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initSucceededRequests() { + if (this.succeededRequests != null) { + return; + } + ContainerUpdateResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getSucceededRequestsList(); + this.succeededRequests = new ArrayList(); + for (ContainerIdProto c : list) { + this.succeededRequests.add(convertFromProtoFormat(c)); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ContainerUpdateResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void initFailedRequests() { + if (this.failedRequests != null) { + return; + } + ContainerUpdateResponseProtoOrBuilder p = viaProto ? proto : builder; + List protoList = p.getFailedRequestsList(); + this.failedRequests = new HashMap(); + for (ContainerExceptionMapProto ce : protoList) { + this.failedRequests.put(convertFromProtoFormat(ce.getContainerId()), + convertFromProtoFormat(ce.getException())); + } + } + + private void mergeLocalToBuilder() { + if (this.succeededRequests != null) { + addSucceededRequestsToProto(); + } + if (this.failedRequests != null) { + addFailedRequestsToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void addSucceededRequestsToProto() { + maybeInitBuilder(); + builder.clearSucceededRequests(); + if (this.succeededRequests == null) { + return; + } + Iterable iterable = new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + private Iterator iter = succeededRequests.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public ContainerIdProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + builder.addAllSucceededRequests(iterable); + } + + private void addFailedRequestsToProto() { + maybeInitBuilder(); + builder.clearFailedRequests(); + if (this.failedRequests == null) { + return; + } + List protoList = + new ArrayList(); + + for (Map.Entry entry : this.failedRequests + .entrySet()) { + protoList.add(ContainerExceptionMapProto.newBuilder() + .setContainerId(convertToProtoFormat(entry.getKey())) + .setException(convertToProtoFormat(entry.getValue())).build()); + } + builder.addAllFailedRequests(protoList); + } + + private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) { + return new ContainerIdPBImpl(p); + } + + private ContainerIdProto convertToProtoFormat(ContainerId t) { + return ((ContainerIdPBImpl) t).getProto(); + } + + private SerializedExceptionPBImpl convertFromProtoFormat( + SerializedExceptionProto p) { + return new SerializedExceptionPBImpl(p); + } + + private SerializedExceptionProto convertToProtoFormat(SerializedException t) { + return ((SerializedExceptionPBImpl) t).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java index 0232debb352..5b028e1925f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java @@ -84,7 +84,7 @@ public class ClientRMProxy extends RMProxy { @Private @Override - protected InetSocketAddress getRMAddress(YarnConfiguration conf, + public InetSocketAddress getRMAddress(YarnConfiguration conf, Class protocol) throws IOException { if (protocol == ApplicationClientProtocol.class) { return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, @@ -111,7 +111,7 @@ public class ClientRMProxy extends RMProxy { @Private @Override - protected void checkAllowedProtocols(Class protocol) { + public void checkAllowedProtocols(Class protocol) { Preconditions.checkArgument( protocol.isAssignableFrom(ClientRMProtocols.class), "RM does not support this client protocol"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java index 8aa4107a98b..f7cb47a9dc8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java @@ -71,14 +71,14 @@ public class RMProxy { * Verify the passed protocol is supported. */ @Private - protected void checkAllowedProtocols(Class protocol) {} + public void checkAllowedProtocols(Class protocol) {} /** * Get the ResourceManager address from the provided Configuration for the * given protocol. */ @Private - protected InetSocketAddress getRMAddress( + public InetSocketAddress getRMAddress( YarnConfiguration conf, Class protocol) throws IOException { throw new UnsupportedOperationException("This method should be invoked " + "from an instance of ClientRMProxy or ServerRMProxy"); @@ -97,7 +97,8 @@ public class RMProxy { YarnConfiguration conf = (configuration instanceof YarnConfiguration) ? (YarnConfiguration) configuration : new YarnConfiguration(configuration); - RetryPolicy retryPolicy = createRetryPolicy(conf, HAUtil.isHAEnabled(conf)); + RetryPolicy retryPolicy = createRetryPolicy(conf, + (HAUtil.isHAEnabled(conf) || HAUtil.isFederationFailoverEnabled(conf))); return newProxyInstance(conf, protocol, instance, retryPolicy); } @@ -123,7 +124,7 @@ public class RMProxy { private static T newProxyInstance(final YarnConfiguration conf, final Class protocol, RMProxy instance, RetryPolicy retryPolicy) throws IOException{ - if (HAUtil.isHAEnabled(conf)) { + if (HAUtil.isHAEnabled(conf) || HAUtil.isFederationEnabled(conf)) { RMFailoverProxyProvider provider = instance.createRMFailoverProxyProvider(conf, protocol); return (T) RetryProxy.create(protocol, provider, retryPolicy); @@ -140,7 +141,7 @@ public class RMProxy { * RetryProxy. */ @Private - T getProxy(final Configuration conf, + public T getProxy(final Configuration conf, final Class protocol, final InetSocketAddress rmAddress) throws IOException { return user.doAs( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java index fc3385bf202..b7bb48ebda3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java @@ -145,9 +145,12 @@ public class FileSystemTimelineWriter extends TimelineWriter{ new LogFDsCache(flushIntervalSecs, cleanIntervalSecs, ttl, timerTaskTTL); - this.isAppendSupported = - conf.getBoolean( - YarnConfiguration.TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND, true); + this.isAppendSupported = conf.getBoolean( + YarnConfiguration.TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND, true); + + boolean storeInsideUserDir = conf.getBoolean( + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR, + false); objMapper = createObjectMapper(); @@ -157,8 +160,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ YarnConfiguration .DEFAULT_TIMELINE_SERVICE_CLIENT_INTERNAL_ATTEMPT_DIR_CACHE_SIZE); - attemptDirCache = - new AttemptDirCache(attemptDirCacheSize, fs, activePath); + attemptDirCache = new AttemptDirCache(attemptDirCacheSize, fs, activePath, + authUgi, storeInsideUserDir); if (LOG.isDebugEnabled()) { StringBuilder debugMSG = new StringBuilder(); @@ -171,6 +174,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ + "=" + ttl + ", " + YarnConfiguration.TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND + "=" + isAppendSupported + ", " + + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR + + "=" + storeInsideUserDir + ", " + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR + "=" + activePath); @@ -946,8 +951,11 @@ public class FileSystemTimelineWriter extends TimelineWriter{ private final Map attemptDirCache; private final FileSystem fs; private final Path activePath; + private final UserGroupInformation authUgi; + private final boolean storeInsideUserDir; - public AttemptDirCache(int cacheSize, FileSystem fs, Path activePath) { + public AttemptDirCache(int cacheSize, FileSystem fs, Path activePath, + UserGroupInformation ugi, boolean storeInsideUserDir) { this.attemptDirCacheSize = cacheSize; this.attemptDirCache = new LinkedHashMap( @@ -961,6 +969,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ }; this.fs = fs; this.activePath = activePath; + this.authUgi = ugi; + this.storeInsideUserDir = storeInsideUserDir; } public Path getAppAttemptDir(ApplicationAttemptId attemptId) @@ -993,8 +1003,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{ } private Path createApplicationDir(ApplicationId appId) throws IOException { - Path appDir = - new Path(activePath, appId.toString()); + Path appRootDir = getAppRootDir(authUgi.getShortUserName()); + Path appDir = new Path(appRootDir, appId.toString()); if (FileSystem.mkdirs(fs, appDir, new FsPermission(APP_LOG_DIR_PERMISSIONS))) { if (LOG.isDebugEnabled()) { @@ -1003,5 +1013,19 @@ public class FileSystemTimelineWriter extends TimelineWriter{ } return appDir; } + + private Path getAppRootDir(String user) throws IOException { + if (!storeInsideUserDir) { + return activePath; + } + Path userDir = new Path(activePath, user); + if (FileSystem.mkdirs(fs, userDir, + new FsPermission(APP_LOG_DIR_PERMISSIONS))) { + if (LOG.isDebugEnabled()) { + LOG.debug("New user directory created - " + userDir); + } + } + return userDir; + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java new file mode 100644 index 00000000000..b4f75c9e267 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.util; + +/** + * Generic interface that can be used for calling back when a corresponding + * asynchronous operation completes. + * + * @param parameter type for the callback + */ +public interface AsyncCallback { + /** + * This method is called back when the corresponding asynchronous operation + * completes. + * + * @param response response of the callback + */ + void callback(T response); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LRUCacheHashMap.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LRUCacheHashMap.java new file mode 100644 index 00000000000..7cb4e1b5015 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/LRUCacheHashMap.java @@ -0,0 +1,49 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.util; + +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * LRU cache with a configurable maximum cache size and access order. + */ +public class LRUCacheHashMap extends LinkedHashMap { + + private static final long serialVersionUID = 1L; + + // Maximum size of the cache + private int maxSize; + + /** + * Constructor. + * + * @param maxSize max size of the cache + * @param accessOrder true for access-order, false for insertion-order + */ + public LRUCacheHashMap(int maxSize, boolean accessOrder) { + super(maxSize, 0.75f, accessOrder); + this.maxSize = maxSize; + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > maxSize; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java index b04bc5dd1be..94063ed222b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java @@ -75,12 +75,12 @@ public class ResponseInfo implements Iterable { return about; } - public ResponseInfo _(String key, Object value) { + public ResponseInfo __(String key, Object value) { items.add(Item.of(key, value, false)); return this; } - public ResponseInfo _(String key, String url, Object anchor) { + public ResponseInfo __(String key, String url, Object anchor) { if (url == null) { items.add(Item.of(key, anchor, false)); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java index 9b5cbd111c6..4376b6e6fcb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/HelloWorld.java @@ -41,11 +41,11 @@ public class HelloWorld { } public static class HelloView extends HtmlPage { - @Override protected void render(Page.HTML<_> html) { + @Override protected void render(Page.HTML<__> html) { html. // produces valid html 4.01 strict title($("title")). p("#hello-for-css"). - _($("title"))._()._(); + __($("title")).__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java index 3973f68dcf3..e13a883541b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/example/MyApp.java @@ -60,12 +60,12 @@ public class MyApp { public static class MyView extends HtmlPage { // You can inject the app in views if needed. @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { html. title("My App"). p("#content_id_for_css_styling"). - _("You can have", $("anything"))._()._(); - // Note, there is no _(); (to parent element) method at root level. + __("You can have", $("anything")).__().__(); + // Note, there is no __(); (to parent element) method at root level. // and IDE provides instant feedback on what level you're on in // the auto-completion drop-downs. } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java index 58d9066b39c..72138653612 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/Hamlet.java @@ -29,6 +29,10 @@ import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.webapp.SubView; +/** + * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. + */ +@Deprecated @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class Hamlet extends HamletImpl implements HamletSpec._Html { public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java index 5acb3f3ee03..8a2db8f9812 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletGen.java @@ -43,7 +43,9 @@ import org.slf4j.LoggerFactory; /** * Generates a specific hamlet implementation class from a spec class * using a generic hamlet implementation class. + * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. */ +@Deprecated @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class HamletGen { static final Logger LOG = LoggerFactory.getLogger(HamletGen.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java index d792d31c38c..289ad704cc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java @@ -43,7 +43,9 @@ import org.apache.hadoop.yarn.webapp.WebAppException; * optimized to use a thread-local element pool. * * Prints HTML as it builds. So the order is important. + * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. */ +@Deprecated @InterfaceAudience.Private public class HamletImpl extends HamletSpec { private static final String INDENT_CHARS = " "; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java index 081516cb14b..e3bb6d1add8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletSpec.java @@ -43,7 +43,9 @@ import org.apache.hadoop.yarn.webapp.SubView; * UPPERCASE, corresponding to an element definition in the DTD. $lowercase is * used as attribute builder methods to differentiate from element builder * methods. + * @deprecated Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. */ +@Deprecated @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class HamletSpec { // The enum values are lowercase for better compression, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java index 3286ab551ed..4d0cf4950c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/package-info.java @@ -15,6 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +/** + * Deprecated. + * Use org.apache.hadoop.yarn.webapp.hamlet2 package instead. + */ +@Deprecated @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) package org.apache.hadoop.yarn.webapp.hamlet; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java new file mode 100644 index 00000000000..05e1b79c357 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/Hamlet.java @@ -0,0 +1,30557 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +// Generated by HamletGen. Do NOT edit! +package org.apache.hadoop.yarn.webapp.hamlet2; +import static java.util.EnumSet.of; +import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.ENDTAG; +import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.INLINE; +import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.PRE; + +import java.io.PrintWriter; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.yarn.webapp.SubView; + +@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) +public class Hamlet extends HamletImpl implements HamletSpec._Html { + public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) { + super(out, nestLevel, wasInline); + } + + static EnumSet opt(boolean endTag, boolean inline, boolean pre) { + EnumSet opts = of(ENDTAG); + if (!endTag) opts.remove(ENDTAG); + if (inline) opts.add(INLINE); + if (pre) opts.add(PRE); + return opts; + } + + public class HTML extends EImp implements HamletSpec.HTML { + public HTML(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public HTML $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public HTML $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public HEAD> head() { + closeAttrs(); + return head_(this, false); + } + + @Override + public BODY> body() { + closeAttrs(); + return body_(this, false); + } + + @Override + public BODY> body(String selector) { + return setSelector(body(), selector); + } + + @Override + public BASE> base() { + closeAttrs(); + return base_(this, false); + } + + @Override + public HTML base(String href) { + return base().$href(href).__(); + } + + @Override + public TITLE> title() { + closeAttrs(); + return title_(this, false); + } + + @Override + public HTML title(String cdata) { + return title().__(cdata).__(); + } + + @Override + public STYLE> style() { + closeAttrs(); + return style_(this, false); + } + + @Override + public HTML style(Object... lines) { + return style().$type("text/css").__(lines).__(); + } + + @Override + public LINK> link() { + closeAttrs(); + return link_(this, false); + } + + @Override + public HTML link(String href) { + return setLinkHref(link(), href).__(); + } + + @Override + public META> meta() { + closeAttrs(); + return meta_(this, false); + } + + @Override + public HTML meta(String name, String content) { + return meta().$name(name).$content(content).__(); + } + + @Override + public HTML meta_http(String header, String content) { + return meta().$http_equiv(header).$content(content).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, false); + } + + @Override + public HTML script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public HTML address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public HTML __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public HTML hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public HTML h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public HTML h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public HTML h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public HTML h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public HTML h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public HTML h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public HTML h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public HTML h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public HTML h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public HTML h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public HTML h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public HTML h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public HTML ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public HTML del(String cdata) { + return del().__(cdata).__(); + } + } + + private HEAD head_(T e, boolean inline) { + return new HEAD("head", e, opt(true, inline, false)); } + + private BODY body_(T e, boolean inline) { + return new BODY("body", e, opt(true, inline, false)); } + + public class SCRIPT extends EImp implements HamletSpec.SCRIPT { + public SCRIPT(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SCRIPT $type(String value) { + addAttr("type", value); + return this; + } + + @Override + public SCRIPT $src(String value) { + addAttr("src", value); + return this; + } + + @Override + public SCRIPT $charset(String value) { + addAttr("charset", value); + return this; + } + + @Override + public SCRIPT $defer(String value) { + addAttr("defer", value); + return this; + } + + @Override + public SCRIPT __(Object... lines) { + _p(false, lines); + return this; + } + } + + public class STYLE extends EImp implements HamletSpec.STYLE { + public STYLE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public STYLE $type(String value) { + addAttr("type", value); + return this; + } + + @Override + public STYLE $media(EnumSet value) { + addMediaAttr("media", value); + return this; + } + + @Override + public STYLE $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public STYLE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public STYLE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public STYLE __(Object... lines) { + _p(false, lines); + return this; + } + } + + public class META extends EImp implements HamletSpec.META { + public META(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public META $http_equiv(String value) { + addAttr("http-equiv", value); + return this; + } + + @Override + public META $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public META $content(String value) { + addAttr("content", value); + return this; + } + + @Override + public META $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public META $dir(Dir value) { + addAttr("dir", value); + return this; + } + } + + public class BASE extends EImp implements HamletSpec.BASE { + public BASE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public BASE $href(String value) { + addAttr("href", value); + return this; + } + } + + public class TITLE extends EImp implements HamletSpec.TITLE { + public TITLE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TITLE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TITLE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TITLE __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public TITLE _r(Object... lines) { + _p(false, lines); + return this; + } + } + + public class HEAD extends EImp implements HamletSpec.HEAD { + public HEAD(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public HEAD $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public HEAD $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public BASE> base() { + closeAttrs(); + return base_(this, false); + } + + @Override + public HEAD base(String href) { + return base().$href(href).__(); + } + + @Override + public TITLE> title() { + closeAttrs(); + return title_(this, false); + } + + @Override + public HEAD title(String cdata) { + return title().__(cdata).__(); + } + + @Override + public STYLE> style() { + closeAttrs(); + return style_(this, false); + } + + @Override + public HEAD style(Object... lines) { + return style().$type("text/css").__(lines).__(); + } + + @Override + public LINK> link() { + closeAttrs(); + return link_(this, false); + } + + @Override + public HEAD link(String href) { + return setLinkHref(link(), href).__(); + } + + @Override + public META> meta() { + closeAttrs(); + return meta_(this, false); + } + + @Override + public HEAD meta(String name, String content) { + return meta().$name(name).$content(content).__(); + } + + @Override + public HEAD meta_http(String header, String content) { + return meta().$http_equiv(header).$content(content).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, false); + } + + @Override + public HEAD script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + } + + private BASE base_(T e, boolean inline) { + return new BASE("base", e, opt(false, inline, false)); } + + private TITLE title_(T e, boolean inline) { + return new TITLE("title", e, opt(true, inline, false)); } + + public class TD extends EImp implements HamletSpec.TD { + public TD(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TD $headers(String value) { + addAttr("headers", value); + return this; + } + + @Override + public TD $scope(Scope value) { + addAttr("scope", value); + return this; + } + + @Override + public TD $rowspan(int value) { + addAttr("rowspan", value); + return this; + } + + @Override + public TD $colspan(int value) { + addAttr("colspan", value); + return this; + } + + @Override + public TD $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TD $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TD $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TD $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TD $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TD $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TD $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TD $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TD $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TD $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TD $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TD $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TD $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TD $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TD $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TD $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public TD address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public TD __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public TD hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public TD h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public TD h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public TD h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public TD h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public TD h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public TD h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public TD h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public TD h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public TD h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public TD h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public TD h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public TD h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public TD __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public TD _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public TD b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public TD b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public TD i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public TD i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public TD small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public TD small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public TD em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public TD em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public TD strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public TD strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public TD dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public TD dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public TD code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public TD code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public TD samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public TD samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public TD kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public TD kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public TD var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public TD var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public TD cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public TD cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public TD abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public TD abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public TD a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public TD a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public TD img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public TD sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public TD sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public TD sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public TD sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public TD q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public TD q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public TD br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public TD bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public TD span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public TD span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public TD script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public TD ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public TD del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public TD label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public TD textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public TD button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class TH extends EImp implements HamletSpec.TH { + public TH(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TH $headers(String value) { + addAttr("headers", value); + return this; + } + + @Override + public TH $scope(Scope value) { + addAttr("scope", value); + return this; + } + + @Override + public TH $rowspan(int value) { + addAttr("rowspan", value); + return this; + } + + @Override + public TH $colspan(int value) { + addAttr("colspan", value); + return this; + } + + @Override + public TH $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TH $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TH $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TH $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TH $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TH $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TH $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TH $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TH $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TH $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TH $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TH $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TH $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TH $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TH $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TH $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public TH address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public TH __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public TH hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public TH h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public TH h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public TH h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public TH h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public TH h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public TH h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public TH h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public TH h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public TH h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public TH h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public TH h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public TH h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public TH __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public TH _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public TH b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public TH b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public TH i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public TH i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public TH small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public TH small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public TH em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public TH em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public TH strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public TH strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public TH dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public TH dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public TH code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public TH code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public TH samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public TH samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public TH kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public TH kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public TH var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public TH var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public TH cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public TH cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public TH abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public TH abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public TH a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public TH a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public TH img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public TH sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public TH sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public TH sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public TH sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public TH q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public TH q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public TH br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public TH bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public TH span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public TH span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public TH script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public TH ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public TH del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public TH label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public TH textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public TH button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class TR extends EImp implements HamletSpec.TR { + public TR(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TR $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TR $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TR $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TR $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TR $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TR $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TR $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TR $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TR $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TR $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TR $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TR $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TR $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TR $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TR $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TR $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TH> th() { + closeAttrs(); + return th_(this, false); + } + + @Override + public TR th(String cdata) { + return th().__(cdata).__(); + } + + @Override + public TR th(String selector, String cdata) { + return setSelector(th(), selector).__(cdata).__(); + } + + public TR th(String selector, String title, String cdata) { + return setSelector(th(), selector).$title(title).__(cdata).__(); + } + + @Override + public TD> td() { + closeAttrs(); + return td_(this, false); + } + + @Override + public TR td(String cdata) { + return td().__(cdata).__(); + } + + @Override + public TR td(String selector, String cdata) { + return setSelector(td(), selector).__(cdata).__(); + } + } + + private TH th_(T e, boolean inline) { + return new TH("th", e, opt(true, inline, false)); } + + private TD td_(T e, boolean inline) { + return new TD("td", e, opt(true, inline, false)); } + + public class COL extends EImp implements HamletSpec.COL { + public COL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public COL $span(int value) { + addAttr("span", value); + return this; + } + + @Override + public COL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public COL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public COL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public COL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public COL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public COL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public COL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public COL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public COL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public COL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public COL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public COL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public COL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public COL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public COL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public COL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + public class COLGROUP extends EImp implements HamletSpec.COLGROUP { + public COLGROUP(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public COLGROUP $span(int value) { + addAttr("span", value); + return this; + } + + @Override + public COLGROUP $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public COLGROUP $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public COLGROUP $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public COLGROUP $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public COLGROUP $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public COLGROUP $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public COLGROUP $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public COLGROUP $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public COLGROUP $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public COLGROUP $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public COLGROUP $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public COLGROUP $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public COLGROUP $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public COLGROUP $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public COLGROUP $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public COLGROUP $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public COL> col() { + closeAttrs(); + return col_(this, false); + } + + @Override + public COLGROUP col(String selector) { + return setSelector(col(), selector).__(); + } + } + + public class TBODY extends EImp implements HamletSpec.TBODY { + public TBODY(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TBODY $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TBODY $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TBODY $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TBODY $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TBODY $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TBODY $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TBODY $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TBODY $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TBODY $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TBODY $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TBODY $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TBODY $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TBODY $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TBODY $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TBODY $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TBODY $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TR> tr() { + closeAttrs(); + return tr_(this, false); + } + + @Override + public TR> tr(String selector) { + return setSelector(tr(), selector); + } + } + + public class TFOOT extends EImp implements HamletSpec.TFOOT { + public TFOOT(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TFOOT $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TFOOT $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TFOOT $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TFOOT $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TFOOT $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TFOOT $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TFOOT $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TFOOT $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TFOOT $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TFOOT $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TFOOT $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TFOOT $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TFOOT $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TFOOT $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TFOOT $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TFOOT $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TR> tr() { + closeAttrs(); + return tr_(this, false); + } + + @Override + public TR> tr(String selector) { + return setSelector(tr(), selector); + } + } + + public class THEAD extends EImp implements HamletSpec.THEAD { + public THEAD(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public THEAD $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public THEAD $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public THEAD $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public THEAD $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public THEAD $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public THEAD $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public THEAD $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public THEAD $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public THEAD $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public THEAD $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public THEAD $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public THEAD $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public THEAD $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public THEAD $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public THEAD $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public THEAD $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TR> tr() { + closeAttrs(); + return tr_(this, false); + } + + @Override + public TR> tr(String selector) { + return setSelector(tr(), selector); + } + } + + public class CAPTION extends EImp implements HamletSpec.CAPTION { + public CAPTION(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public CAPTION $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public CAPTION $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public CAPTION $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public CAPTION $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public CAPTION $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public CAPTION $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public CAPTION $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public CAPTION $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public CAPTION $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public CAPTION $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public CAPTION $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public CAPTION $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public CAPTION $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public CAPTION $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public CAPTION $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public CAPTION $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public CAPTION __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public CAPTION _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public CAPTION b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public CAPTION b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public CAPTION i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public CAPTION i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public CAPTION small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public CAPTION small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public CAPTION em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public CAPTION em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public CAPTION strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public CAPTION strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public CAPTION dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public CAPTION dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public CAPTION code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public CAPTION code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public CAPTION samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public CAPTION samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public CAPTION kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public CAPTION kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public CAPTION var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public CAPTION var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public CAPTION cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public CAPTION cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public CAPTION abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public CAPTION abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public CAPTION a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public CAPTION a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public CAPTION img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public CAPTION sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public CAPTION sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public CAPTION sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public CAPTION sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public CAPTION q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public CAPTION q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public CAPTION br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public CAPTION bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public CAPTION span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public CAPTION span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public CAPTION script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public CAPTION ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public CAPTION del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public CAPTION label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public CAPTION textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public CAPTION button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class TABLE extends EImp implements HamletSpec.TABLE { + public TABLE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TABLE $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TABLE $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TABLE $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TABLE $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TABLE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TABLE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TABLE $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TABLE $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TABLE $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TABLE $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TABLE $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TABLE $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TABLE $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TABLE $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TABLE $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TABLE $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE caption(String cdata) { + return caption().__(cdata).__(); + } + + @Override + public CAPTION> caption() { + closeAttrs(); + return caption_(this, false); + } + + @Override + public COLGROUP> colgroup() { + closeAttrs(); + return colgroup_(this, false); + } + + @Override + public THEAD> thead(String selector) { + return setSelector(thead(), selector); + } + + @Override + public THEAD> thead() { + closeAttrs(); + return thead_(this, false); + } + + @Override + public TFOOT> tfoot() { + closeAttrs(); + return tfoot_(this, false); + } + + @Override + public TFOOT> tfoot(String selector) { + return setSelector(tfoot(), selector); + } + + @Override + public TBODY> tbody() { + closeAttrs(); + return tbody_(this, false); + } + + @Override + public TBODY> tbody(String selector) { + return setSelector(tbody(), selector); + } + + @Override + public TR> tr() { + closeAttrs(); + return tr_(this, false); + } + + @Override + public TR> tr(String selector) { + return setSelector(tr(), selector); + } + + @Override + public COL> col() { + closeAttrs(); + return col_(this, false); + } + + @Override + public TABLE col(String selector) { + return setSelector(col(), selector).__(); + } + } + + private CAPTION caption_(T e, boolean inline) { + return new CAPTION("caption", e, opt(true, inline, false)); } + + private COLGROUP colgroup_(T e, boolean inline) { + return new COLGROUP("colgroup", e, opt(false, inline, false)); } + + private THEAD thead_(T e, boolean inline) { + return new THEAD("thead", e, opt(true, inline, false)); } + + private TFOOT tfoot_(T e, boolean inline) { + return new TFOOT("tfoot", e, opt(true, inline, false)); } + + private TBODY tbody_(T e, boolean inline) { + return new TBODY("tbody", e, opt(true, inline, false)); } + + private COL col_(T e, boolean inline) { + return new COL("col", e, opt(false, inline, false)); } + + private TR tr_(T e, boolean inline) { + return new TR("tr", e, opt(true, inline, false)); } + + public class BUTTON extends EImp implements HamletSpec.BUTTON { + public BUTTON(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public BUTTON $type(ButtonType value) { + addAttr("type", value); + return this; + } + + @Override + public BUTTON $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public BUTTON $value(String value) { + addAttr("value", value); + return this; + } + + @Override + public BUTTON $disabled() { + addAttr("disabled", null); + return this; + } + + @Override + public BUTTON $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public BUTTON $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public BUTTON $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public BUTTON $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public BUTTON address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public BUTTON __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public BUTTON hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BUTTON h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public BUTTON h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public BUTTON h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public BUTTON h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public BUTTON h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public BUTTON h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public BUTTON h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public BUTTON h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public BUTTON h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public BUTTON h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public BUTTON h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public BUTTON h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public BUTTON __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public BUTTON _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public BUTTON b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public BUTTON b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public BUTTON i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public BUTTON i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public BUTTON small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public BUTTON small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public BUTTON em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public BUTTON em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public BUTTON strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public BUTTON strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public BUTTON dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public BUTTON dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public BUTTON code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public BUTTON code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public BUTTON samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public BUTTON samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public BUTTON kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public BUTTON kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public BUTTON var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public BUTTON var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public BUTTON cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public BUTTON cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public BUTTON abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public BUTTON abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public BUTTON q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public BUTTON q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public BUTTON br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public BUTTON bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public BUTTON span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public BUTTON span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public BUTTON script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public BUTTON ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public BUTTON del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public BUTTON img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public BUTTON sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public BUTTON sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public BUTTON sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public BUTTON sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public BUTTON $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public BUTTON $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public BUTTON $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public BUTTON $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public BUTTON $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public BUTTON $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public BUTTON $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public BUTTON $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public BUTTON $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public BUTTON $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public BUTTON $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public BUTTON $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public BUTTON $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public BUTTON $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public BUTTON $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public BUTTON $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + public class LEGEND extends EImp implements HamletSpec.LEGEND { + public LEGEND(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public LEGEND $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public LEGEND $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public LEGEND $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public LEGEND $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public LEGEND $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public LEGEND $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public LEGEND $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public LEGEND $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public LEGEND $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public LEGEND $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public LEGEND $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public LEGEND $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public LEGEND $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public LEGEND $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public LEGEND $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public LEGEND $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public LEGEND $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public LEGEND __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public LEGEND _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public LEGEND b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public LEGEND b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public LEGEND i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public LEGEND i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public LEGEND small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public LEGEND small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public LEGEND em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public LEGEND em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public LEGEND strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public LEGEND strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public LEGEND dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public LEGEND dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public LEGEND code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public LEGEND code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public LEGEND samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public LEGEND samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public LEGEND kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public LEGEND kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public LEGEND var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public LEGEND var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public LEGEND cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public LEGEND cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public LEGEND abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public LEGEND abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public LEGEND a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public LEGEND a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public LEGEND img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public LEGEND sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public LEGEND sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public LEGEND sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public LEGEND sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public LEGEND q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public LEGEND q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public LEGEND br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public LEGEND bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public LEGEND span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public LEGEND span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public LEGEND script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public LEGEND ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public LEGEND del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public LEGEND label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public LEGEND textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public LEGEND button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class FIELDSET extends EImp implements HamletSpec.FIELDSET { + public FIELDSET(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public FIELDSET $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public FIELDSET $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public FIELDSET $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public FIELDSET $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public FIELDSET $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public FIELDSET $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public FIELDSET $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public FIELDSET $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public FIELDSET $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public FIELDSET $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public FIELDSET $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public FIELDSET $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public FIELDSET $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public FIELDSET $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public FIELDSET $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public FIELDSET $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public LEGEND> legend() { + closeAttrs(); + return legend_(this, false); + } + + @Override + public FIELDSET legend(String cdata) { + return legend().__(cdata).__(); + } + + @Override + public FIELDSET __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public FIELDSET _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public FIELDSET address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public FIELDSET __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public FIELDSET hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public FIELDSET h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public FIELDSET h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public FIELDSET h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public FIELDSET h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public FIELDSET h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public FIELDSET h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public FIELDSET h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public FIELDSET h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public FIELDSET h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public FIELDSET h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public FIELDSET h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public FIELDSET h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public FIELDSET b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public FIELDSET b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public FIELDSET i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public FIELDSET i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public FIELDSET small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public FIELDSET small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public FIELDSET em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public FIELDSET em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public FIELDSET strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public FIELDSET strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public FIELDSET dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public FIELDSET dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public FIELDSET code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public FIELDSET code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public FIELDSET samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public FIELDSET samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public FIELDSET kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public FIELDSET kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public FIELDSET var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public FIELDSET var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public FIELDSET cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public FIELDSET cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public FIELDSET abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public FIELDSET abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public FIELDSET a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public FIELDSET a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public FIELDSET img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public FIELDSET sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public FIELDSET sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public FIELDSET sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public FIELDSET sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public FIELDSET q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public FIELDSET q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public FIELDSET br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public FIELDSET bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public FIELDSET span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public FIELDSET span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public FIELDSET script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public FIELDSET ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public FIELDSET del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public FIELDSET label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public FIELDSET textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public FIELDSET button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + private LEGEND legend_(T e, boolean inline) { + return new LEGEND("legend", e, opt(true, inline, false)); } + + public class TEXTAREA extends EImp implements HamletSpec.TEXTAREA { + public TEXTAREA(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public TEXTAREA $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public TEXTAREA $disabled() { + addAttr("disabled", null); + return this; + } + + @Override + public TEXTAREA $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public TEXTAREA $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public TEXTAREA $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public TEXTAREA $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public TEXTAREA $rows(int value) { + addAttr("rows", value); + return this; + } + + @Override + public TEXTAREA $cols(int value) { + addAttr("cols", value); + return this; + } + + @Override + public TEXTAREA $readonly() { + addAttr("readonly", null); + return this; + } + + @Override + public TEXTAREA $onselect(String value) { + addAttr("onselect", value); + return this; + } + + @Override + public TEXTAREA $onchange(String value) { + addAttr("onchange", value); + return this; + } + + @Override + public TEXTAREA $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public TEXTAREA $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public TEXTAREA $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public TEXTAREA $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public TEXTAREA $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public TEXTAREA $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public TEXTAREA $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public TEXTAREA $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public TEXTAREA $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public TEXTAREA $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public TEXTAREA $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public TEXTAREA $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public TEXTAREA $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public TEXTAREA $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public TEXTAREA $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public TEXTAREA $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TEXTAREA __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public TEXTAREA _r(Object... lines) { + _p(false, lines); + return this; + } + } + + public class OPTION extends EImp implements HamletSpec.OPTION { + public OPTION(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public OPTION $value(String value) { + addAttr("value", value); + return this; + } + + @Override + public OPTION $disabled() { + addAttr("disabled", null); + return this; + } + + @Override + public OPTION $selected() { + addAttr("selected", null); + return this; + } + + @Override + public OPTION $label(String value) { + addAttr("label", value); + return this; + } + + @Override + public OPTION $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public OPTION $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public OPTION $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public OPTION $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public OPTION $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public OPTION $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public OPTION $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public OPTION $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public OPTION $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public OPTION $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public OPTION $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public OPTION $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public OPTION $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public OPTION $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public OPTION $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public OPTION $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public OPTION __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public OPTION _r(Object... lines) { + _p(false, lines); + return this; + } + } + + public class OPTGROUP extends EImp implements HamletSpec.OPTGROUP { + public OPTGROUP(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public OPTGROUP $disabled() { + addAttr("disabled", null); + return this; + } + + @Override + public OPTGROUP $label(String value) { + addAttr("label", value); + return this; + } + + @Override + public OPTGROUP $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public OPTGROUP $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public OPTGROUP $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public OPTGROUP $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public OPTGROUP $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public OPTGROUP $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public OPTGROUP $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public OPTGROUP $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public OPTGROUP $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public OPTGROUP $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public OPTGROUP $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public OPTGROUP $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public OPTGROUP $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public OPTGROUP $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public OPTGROUP $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public OPTGROUP $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public OPTION> option() { + closeAttrs(); + return option_(this, false); + } + + @Override + public OPTGROUP option(String cdata) { + return option().__(cdata).__(); + } + } + + private OPTGROUP optgroup_(T e, boolean inline) { + return new OPTGROUP("optgroup", e, opt(true, inline, false)); } + + public class SELECT extends EImp implements HamletSpec.SELECT { + public SELECT(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SELECT $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public SELECT $disabled() { + addAttr("disabled", null); + return this; + } + + @Override + public SELECT $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public SELECT $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public SELECT $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public SELECT $onchange(String value) { + addAttr("onchange", value); + return this; + } + + @Override + public OPTGROUP> optgroup() { + closeAttrs(); + return optgroup_(this, false); + } + + @Override + public SELECT $size(int value) { + addAttr("size", value); + return this; + } + + @Override + public SELECT $multiple() { + addAttr("multiple", null); + return this; + } + + @Override + public SELECT $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public SELECT $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public SELECT $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public SELECT $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public SELECT $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public SELECT $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public SELECT $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public SELECT $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public SELECT $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public SELECT $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public SELECT $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public SELECT $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public SELECT $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public SELECT $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public SELECT $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public SELECT $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public OPTION> option() { + closeAttrs(); + return option_(this, false); + } + + @Override + public SELECT option(String cdata) { + return option().__(cdata).__(); + } + } + + private OPTION option_(T e, boolean inline) { + return new OPTION("option", e, opt(false, inline, false)); } + + public class INPUT extends EImp implements HamletSpec.INPUT { + public INPUT(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public INPUT $type(InputType value) { + addAttr("type", value); + return this; + } + + @Override + public INPUT $src(String value) { + addAttr("src", value); + return this; + } + + @Override + public INPUT $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public INPUT $value(String value) { + addAttr("value", value); + return this; + } + + @Override + public INPUT $disabled() { + addAttr("disabled", null); + return this; + } + + @Override + public INPUT $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public INPUT $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public INPUT $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public INPUT $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public INPUT $readonly() { + addAttr("readonly", null); + return this; + } + + @Override + public INPUT $onselect(String value) { + addAttr("onselect", value); + return this; + } + + @Override + public INPUT $onchange(String value) { + addAttr("onchange", value); + return this; + } + + @Override + public INPUT $size(String value) { + addAttr("size", value); + return this; + } + + @Override + public INPUT $checked() { + addAttr("checked", null); + return this; + } + + @Override + public INPUT $maxlength(int value) { + addAttr("maxlength", value); + return this; + } + + @Override + public INPUT $alt(String value) { + addAttr("alt", value); + return this; + } + + @Override + public INPUT $ismap() { + addAttr("ismap", null); + return this; + } + + @Override + public INPUT $accept(String value) { + addAttr("accept", value); + return this; + } + + @Override + public INPUT $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public INPUT $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public INPUT $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public INPUT $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public INPUT $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public INPUT $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public INPUT $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public INPUT $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public INPUT $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public INPUT $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public INPUT $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public INPUT $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public INPUT $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public INPUT $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public INPUT $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public INPUT $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + public class LABEL extends EImp implements HamletSpec.LABEL { + public LABEL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public LABEL $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public LABEL $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public LABEL $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public LABEL $for(String value) { + addAttr("for", value); + return this; + } + + @Override + public LABEL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public LABEL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public LABEL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public LABEL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public LABEL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public LABEL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public LABEL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public LABEL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public LABEL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public LABEL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public LABEL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public LABEL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public LABEL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public LABEL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public LABEL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public LABEL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public LABEL __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public LABEL _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public LABEL b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public LABEL b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public LABEL i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public LABEL i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public LABEL small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public LABEL small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public LABEL em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public LABEL em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public LABEL strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public LABEL strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public LABEL dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public LABEL dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public LABEL code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public LABEL code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public LABEL samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public LABEL samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public LABEL kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public LABEL kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public LABEL var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public LABEL var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public LABEL cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public LABEL cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public LABEL abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public LABEL abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public LABEL a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public LABEL a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public LABEL img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public LABEL sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public LABEL sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public LABEL sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public LABEL sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public LABEL q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public LABEL q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public LABEL br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public LABEL bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public LABEL span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public LABEL span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public LABEL script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public LABEL ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public LABEL del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public LABEL textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public LABEL button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class FORM extends EImp implements HamletSpec.FORM { + public FORM(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public FORM $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public FORM $accept(String value) { + addAttr("accept", value); + return this; + } + + @Override + public FORM $action(String value) { + addAttr("action", value); + return this; + } + + @Override + public FORM $method(Method value) { + addAttr("method", value); + return this; + } + + @Override + public FORM $enctype(String value) { + addAttr("enctype", value); + return this; + } + + @Override + public FORM $onsubmit(String value) { + addAttr("onsubmit", value); + return this; + } + + @Override + public FORM $onreset(String value) { + addAttr("onreset", value); + return this; + } + + @Override + public FORM $accept_charset(String value) { + addAttr("accept-charset", value); + return this; + } + + @Override + public FORM $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public FORM $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public FORM $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public FORM $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public FORM $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public FORM $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public FORM $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public FORM $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public FORM $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public FORM $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public FORM $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public FORM $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public FORM $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public FORM $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public FORM $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public FORM $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public FORM script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public FORM address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public FORM __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public FORM hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public FORM h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public FORM h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public FORM h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public FORM h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public FORM h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public FORM h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public FORM h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public FORM h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public FORM h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public FORM h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public FORM h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public FORM h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + } + + public class LI extends EImp implements HamletSpec.LI { + public LI(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public LI $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public LI $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public LI $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public LI $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public LI $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public LI $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public LI $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public LI $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public LI $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public LI $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public LI $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public LI $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public LI $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public LI $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public LI $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public LI $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public LI address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public LI __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public LI hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public LI h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public LI h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public LI h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public LI h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public LI h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public LI h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public LI h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public LI h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public LI h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public LI h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public LI h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public LI h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public LI __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public LI _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public LI b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public LI b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public LI i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public LI i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public LI small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public LI small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public LI em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public LI em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public LI strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public LI strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public LI dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public LI dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public LI code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public LI code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public LI samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public LI samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public LI kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public LI kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public LI var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public LI var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public LI cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public LI cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public LI abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public LI abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public LI a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public LI a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public LI img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public LI sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public LI sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public LI sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public LI sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public LI q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public LI q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public LI br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public LI bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public LI span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public LI span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public LI script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public LI ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public LI del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public LI label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public LI textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public LI button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class UL extends EImp implements HamletSpec.UL { + public UL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public UL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public UL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public UL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public UL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public UL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public UL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public UL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public UL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public UL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public UL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public UL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public UL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public UL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public UL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public UL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public UL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public LI> li() { + closeAttrs(); + return li_(this, false); + } + + @Override + public UL li(String cdata) { + return li().__(cdata).__(); + } + } + + public class OL extends EImp implements HamletSpec.OL { + public OL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public OL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public OL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public OL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public OL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public OL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public OL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public OL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public OL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public OL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public OL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public OL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public OL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public OL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public OL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public OL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public OL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public LI> li() { + closeAttrs(); + return li_(this, false); + } + + @Override + public OL li(String cdata) { + return li().__(cdata).__(); + } + } + + private LI li_(T e, boolean inline) { + return new LI("li", e, opt(false, inline, false)); } + + public class DD extends EImp implements HamletSpec.DD { + public DD(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public DD $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public DD $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public DD $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public DD $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public DD $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public DD $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public DD $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public DD $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public DD $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public DD $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public DD $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public DD $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public DD $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public DD $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public DD $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public DD $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public DD address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public DD __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public DD hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public DD h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public DD h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public DD h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public DD h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public DD h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public DD h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public DD h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public DD h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public DD h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public DD h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public DD h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public DD h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public DD __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public DD _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public DD b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public DD b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public DD i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public DD i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public DD small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public DD small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public DD em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public DD em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public DD strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public DD strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public DD dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public DD dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public DD code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public DD code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public DD samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public DD samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public DD kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public DD kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public DD var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public DD var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public DD cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public DD cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public DD abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public DD abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public DD a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public DD a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public DD img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public DD sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public DD sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public DD sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public DD sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public DD q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public DD q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public DD br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public DD bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public DD span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public DD span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public DD script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public DD ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public DD del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public DD label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public DD textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public DD button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class DT extends EImp implements HamletSpec.DT { + public DT(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public DT $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public DT $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public DT $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public DT $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public DT $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public DT $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public DT $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public DT $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public DT $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public DT $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public DT $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public DT $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public DT $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public DT $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public DT $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public DT $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public DT __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public DT _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public DT b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public DT b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public DT i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public DT i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public DT small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public DT small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public DT em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public DT em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public DT strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public DT strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public DT dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public DT dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public DT code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public DT code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public DT samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public DT samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public DT kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public DT kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public DT var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public DT var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public DT cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public DT cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public DT abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public DT abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public DT a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public DT a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public DT img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public DT sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public DT sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public DT sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public DT sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public DT q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public DT q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public DT br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public DT bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public DT span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public DT span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public DT script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public DT ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public DT del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public DT label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public DT textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public DT button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class DL extends EImp implements HamletSpec.DL { + public DL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public DL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public DL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public DL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public DL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public DL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public DL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public DL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public DL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public DL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public DL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public DL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public DL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public DL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public DL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public DL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public DL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public DT> dt() { + closeAttrs(); + return dt_(this, false); + } + + @Override + public DL dt(String cdata) { + return dt().__(cdata).__(); + } + + @Override + public DD> dd() { + closeAttrs(); + return dd_(this, false); + } + + @Override + public DL dd(String cdata) { + return dd().__(cdata).__(); + } + } + + private DT dt_(T e, boolean inline) { + return new DT("dt", e, opt(false, inline, false)); } + + private DD dd_(T e, boolean inline) { + return new DD("dd", e, opt(false, inline, false)); } + + public class DEL extends EImp implements HamletSpec.DEL { + public DEL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public DEL $cite(String value) { + addAttr("cite", value); + return this; + } + + @Override + public DEL $datetime(String value) { + addAttr("datetime", value); + return this; + } + + @Override + public DEL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public DEL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public DEL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public DEL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public DEL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public DEL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public DEL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public DEL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public DEL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public DEL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public DEL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public DEL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public DEL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public DEL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public DEL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public DEL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public DEL address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public DEL __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public DEL hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public DEL h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public DEL h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public DEL h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public DEL h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public DEL h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public DEL h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public DEL h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public DEL h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public DEL h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public DEL h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public DEL h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public DEL h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public DEL __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public DEL _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public DEL b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public DEL b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public DEL i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public DEL i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public DEL small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public DEL small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public DEL em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public DEL em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public DEL strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public DEL strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public DEL dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public DEL dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public DEL code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public DEL code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public DEL samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public DEL samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public DEL kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public DEL kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public DEL var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public DEL var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public DEL cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public DEL cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public DEL abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public DEL abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public DEL a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public DEL a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public DEL img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public DEL sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public DEL sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public DEL sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public DEL sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public DEL q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public DEL q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public DEL br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public DEL bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public DEL span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public DEL span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public DEL script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public DEL ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public DEL del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public DEL label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public DEL textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public DEL button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class INS extends EImp implements HamletSpec.INS { + public INS(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public INS $cite(String value) { + addAttr("cite", value); + return this; + } + + @Override + public INS $datetime(String value) { + addAttr("datetime", value); + return this; + } + + @Override + public INS $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public INS $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public INS $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public INS $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public INS $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public INS $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public INS $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public INS $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public INS $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public INS $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public INS $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public INS $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public INS $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public INS $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public INS $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public INS $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public INS address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public INS __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public INS hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public INS h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public INS h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public INS h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public INS h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public INS h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public INS h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public INS h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public INS h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public INS h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public INS h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public INS h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public INS h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public INS __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public INS _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public INS b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public INS b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public INS i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public INS i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public INS small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public INS small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public INS em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public INS em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public INS strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public INS strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public INS dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public INS dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public INS code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public INS code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public INS samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public INS samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public INS kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public INS kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public INS var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public INS var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public INS cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public INS cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public INS abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public INS abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public INS a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public INS a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public INS img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public INS sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public INS sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public INS sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public INS sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public INS q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public INS q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public INS br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public INS bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public INS span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public INS span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public INS script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public INS ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public INS del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public INS label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public INS textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public INS button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class BLOCKQUOTE extends EImp implements HamletSpec.BLOCKQUOTE { + public BLOCKQUOTE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public BLOCKQUOTE $cite(String value) { + addAttr("cite", value); + return this; + } + + @Override + public BLOCKQUOTE $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public BLOCKQUOTE $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public BLOCKQUOTE $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public BLOCKQUOTE $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public BLOCKQUOTE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public BLOCKQUOTE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public BLOCKQUOTE $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public BLOCKQUOTE $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public BLOCKQUOTE $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public BLOCKQUOTE $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public BLOCKQUOTE $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public BLOCKQUOTE $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public BLOCKQUOTE $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public BLOCKQUOTE $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public BLOCKQUOTE $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public BLOCKQUOTE $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public BLOCKQUOTE address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public BLOCKQUOTE __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public BLOCKQUOTE hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public BLOCKQUOTE h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public BLOCKQUOTE h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public BLOCKQUOTE h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public BLOCKQUOTE h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public BLOCKQUOTE h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public BLOCKQUOTE h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public BLOCKQUOTE h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public BLOCKQUOTE h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public BLOCKQUOTE h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public BLOCKQUOTE h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public BLOCKQUOTE h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public BLOCKQUOTE script(String src) { + return setScriptSrc(script(), src).__(); + } + } + + public class Q extends EImp implements HamletSpec.Q { + public Q(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public Q $cite(String value) { + addAttr("cite", value); + return this; + } + + @Override + public Q $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public Q $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public Q $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public Q $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public Q $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public Q $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public Q $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public Q $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public Q $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public Q $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public Q $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public Q $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public Q $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public Q $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public Q $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public Q $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public Q __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public Q _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public Q b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public Q b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public Q i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public Q i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public Q small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public Q small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public Q em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public Q em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public Q strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public Q strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public Q dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public Q dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public Q code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public Q code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public Q samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public Q samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public Q kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public Q kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public Q var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public Q var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public Q cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public Q cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public Q abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public Q abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public Q a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public Q a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public Q img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public Q sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public Q sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public Q sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public Q sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public Q q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public Q q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public Q br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public Q bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public Q span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public Q span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public Q script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public Q ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public Q del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public Q label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public Q textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public Q button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class PRE extends EImp implements HamletSpec.PRE { + public PRE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public PRE $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public PRE $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public PRE $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public PRE $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public PRE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public PRE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public PRE $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public PRE $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public PRE $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public PRE $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public PRE $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public PRE $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public PRE $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public PRE $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public PRE $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public PRE $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public PRE __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public PRE _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public PRE b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public PRE b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public PRE i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public PRE i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public PRE em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public PRE em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public PRE strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public PRE strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public PRE dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public PRE dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public PRE code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public PRE code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public PRE samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public PRE samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public PRE kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public PRE kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public PRE var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public PRE var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public PRE cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public PRE cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public PRE abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public PRE abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public PRE a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public PRE a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public PRE q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public PRE q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public PRE br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public PRE bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public PRE span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public PRE span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public PRE script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public PRE ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public PRE del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public PRE label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public PRE textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public PRE button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class H6 extends EImp implements HamletSpec.H6 { + public H6(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public H6 $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public H6 $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public H6 $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public H6 $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public H6 $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public H6 $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public H6 $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public H6 $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public H6 $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public H6 $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public H6 $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public H6 $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public H6 $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public H6 $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public H6 $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public H6 $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public H6 __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public H6 _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public H6 b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public H6 b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public H6 i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public H6 i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public H6 small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public H6 small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public H6 em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public H6 em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public H6 strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public H6 strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public H6 dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public H6 dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public H6 code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public H6 code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public H6 samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public H6 samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public H6 kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public H6 kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public H6 var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public H6 var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public H6 cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public H6 cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public H6 abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public H6 abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public H6 a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public H6 a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public H6 img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public H6 sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public H6 sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public H6 sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public H6 sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public H6 q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public H6 q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public H6 br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public H6 bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public H6 span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public H6 span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public H6 script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public H6 ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public H6 del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public H6 label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public H6 textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public H6 button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class H5 extends EImp implements HamletSpec.H5 { + public H5(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public H5 $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public H5 $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public H5 $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public H5 $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public H5 $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public H5 $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public H5 $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public H5 $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public H5 $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public H5 $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public H5 $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public H5 $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public H5 $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public H5 $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public H5 $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public H5 $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public H5 __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public H5 _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public H5 b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public H5 b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public H5 i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public H5 i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public H5 small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public H5 small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public H5 em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public H5 em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public H5 strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public H5 strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public H5 dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public H5 dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public H5 code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public H5 code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public H5 samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public H5 samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public H5 kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public H5 kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public H5 var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public H5 var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public H5 cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public H5 cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public H5 abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public H5 abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public H5 a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public H5 a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public H5 img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public H5 sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public H5 sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public H5 sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public H5 sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public H5 q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public H5 q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public H5 br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public H5 bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public H5 span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public H5 span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public H5 script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public H5 ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public H5 del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public H5 label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public H5 textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public H5 button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class H4 extends EImp implements HamletSpec.H4 { + public H4(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public H4 $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public H4 $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public H4 $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public H4 $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public H4 $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public H4 $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public H4 $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public H4 $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public H4 $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public H4 $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public H4 $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public H4 $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public H4 $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public H4 $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public H4 $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public H4 $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public H4 __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public H4 _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public H4 b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public H4 b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public H4 i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public H4 i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public H4 small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public H4 small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public H4 em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public H4 em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public H4 strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public H4 strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public H4 dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public H4 dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public H4 code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public H4 code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public H4 samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public H4 samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public H4 kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public H4 kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public H4 var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public H4 var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public H4 cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public H4 cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public H4 abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public H4 abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public H4 a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public H4 a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public H4 img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public H4 sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public H4 sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public H4 sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public H4 sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public H4 q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public H4 q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public H4 br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public H4 bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public H4 span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public H4 span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public H4 script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public H4 ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public H4 del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public H4 label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public H4 textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public H4 button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class H3 extends EImp implements HamletSpec.H3 { + public H3(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public H3 $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public H3 $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public H3 $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public H3 $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public H3 $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public H3 $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public H3 $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public H3 $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public H3 $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public H3 $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public H3 $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public H3 $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public H3 $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public H3 $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public H3 $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public H3 $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public H3 __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public H3 _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public H3 b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public H3 b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public H3 i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public H3 i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public H3 small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public H3 small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public H3 em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public H3 em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public H3 strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public H3 strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public H3 dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public H3 dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public H3 code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public H3 code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public H3 samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public H3 samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public H3 kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public H3 kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public H3 var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public H3 var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public H3 cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public H3 cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public H3 abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public H3 abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public H3 a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public H3 a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public H3 img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public H3 sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public H3 sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public H3 sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public H3 sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public H3 q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public H3 q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public H3 br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public H3 bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public H3 span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public H3 span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public H3 script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public H3 ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public H3 del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public H3 label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public H3 textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public H3 button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class H2 extends EImp implements HamletSpec.H2 { + public H2(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public H2 $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public H2 $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public H2 $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public H2 $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public H2 $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public H2 $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public H2 $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public H2 $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public H2 $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public H2 $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public H2 $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public H2 $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public H2 $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public H2 $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public H2 $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public H2 $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public H2 __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public H2 _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public H2 b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public H2 b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public H2 i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public H2 i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public H2 small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public H2 small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public H2 em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public H2 em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public H2 strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public H2 strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public H2 dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public H2 dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public H2 code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public H2 code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public H2 samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public H2 samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public H2 kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public H2 kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public H2 var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public H2 var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public H2 cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public H2 cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public H2 abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public H2 abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public H2 a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public H2 a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public H2 img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public H2 sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public H2 sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public H2 sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public H2 sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public H2 q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public H2 q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public H2 br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public H2 bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public H2 span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public H2 span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public H2 script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public H2 ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public H2 del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public H2 label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public H2 textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public H2 button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class H1 extends EImp implements HamletSpec.H1 { + public H1(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public H1 $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public H1 $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public H1 $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public H1 $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public H1 $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public H1 $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public H1 $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public H1 $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public H1 $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public H1 $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public H1 $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public H1 $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public H1 $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public H1 $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public H1 $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public H1 $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public H1 __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public H1 _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public H1 b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public H1 b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public H1 i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public H1 i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public H1 small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public H1 small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public H1 em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public H1 em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public H1 strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public H1 strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public H1 dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public H1 dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public H1 code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public H1 code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public H1 samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public H1 samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public H1 kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public H1 kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public H1 var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public H1 var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public H1 cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public H1 cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public H1 abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public H1 abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public H1 a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public H1 a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public H1 img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public H1 sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public H1 sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public H1 sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public H1 sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public H1 q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public H1 q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public H1 br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public H1 bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public H1 span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public H1 span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public H1 script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public H1 ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public H1 del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public H1 label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public H1 textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public H1 button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class P extends EImp implements HamletSpec.P { + public P(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public P $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public P $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public P $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public P $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public P $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public P $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public P $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public P $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public P $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public P $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public P $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public P $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public P $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public P $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public P $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public P $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public P __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public P _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public P b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public P b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public P i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public P i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public P small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public P small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public P em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public P em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public P strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public P strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public P dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public P dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public P code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public P code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public P samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public P samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public P kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public P kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public P var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public P var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public P cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public P cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public P abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public P abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public P a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public P a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public P img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public P sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public P sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public P sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public P sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public P q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public P q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public P br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public P bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public P span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public P span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public P script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public P ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public P del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public P label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public P textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public P button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class HR extends EImp implements HamletSpec.HR { + public HR(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public HR $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public HR $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public HR $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public HR $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public HR $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public HR $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public HR $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public HR $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public HR $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public HR $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public HR $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public HR $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public HR $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public HR $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public HR $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public HR $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + public class PARAM extends EImp implements HamletSpec.PARAM { + public PARAM(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public PARAM $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public PARAM $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public PARAM $value(String value) { + addAttr("value", value); + return this; + } + } + + public class OBJECT extends EImp implements HamletSpec.OBJECT { + public OBJECT(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public OBJECT $type(String value) { + addAttr("type", value); + return this; + } + + @Override + public OBJECT $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public OBJECT $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public OBJECT $data(String value) { + addAttr("data", value); + return this; + } + + @Override + public OBJECT $height(String value) { + addAttr("height", value); + return this; + } + + @Override + public OBJECT $height(int value) { + addAttr("height", value); + return this; + } + + @Override + public OBJECT $width(int value) { + addAttr("width", value); + return this; + } + + @Override + public OBJECT $width(String value) { + addAttr("width", value); + return this; + } + + @Override + public OBJECT $usemap(String value) { + addAttr("usemap", value); + return this; + } + + @Override + public OBJECT $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public OBJECT $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public OBJECT $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public OBJECT $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public OBJECT $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public OBJECT $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public OBJECT $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public OBJECT $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public OBJECT $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public OBJECT $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public OBJECT $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public OBJECT $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public OBJECT $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public OBJECT $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public OBJECT $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public OBJECT $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public PARAM> param() { + closeAttrs(); + return param_(this, false); + } + + @Override + public OBJECT param(String name, String value) { + return param().$name(name).$value(value).__(); + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public OBJECT address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public OBJECT __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public OBJECT hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public OBJECT h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public OBJECT h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public OBJECT h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public OBJECT h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public OBJECT h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public OBJECT h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public OBJECT h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public OBJECT h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public OBJECT h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public OBJECT h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public OBJECT h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public OBJECT h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public OBJECT __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public OBJECT _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public OBJECT b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public OBJECT b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public OBJECT i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public OBJECT i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public OBJECT small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public OBJECT small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public OBJECT em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public OBJECT em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public OBJECT strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public OBJECT strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public OBJECT dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public OBJECT dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public OBJECT code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public OBJECT code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public OBJECT samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public OBJECT samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public OBJECT kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public OBJECT kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public OBJECT var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public OBJECT var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public OBJECT cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public OBJECT cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public OBJECT abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public OBJECT abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public OBJECT a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public OBJECT a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public OBJECT img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public OBJECT sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public OBJECT sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public OBJECT sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public OBJECT sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public OBJECT q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public OBJECT q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public OBJECT br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public OBJECT bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public OBJECT span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public OBJECT span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public OBJECT script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public OBJECT ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public OBJECT del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public OBJECT label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public OBJECT textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public OBJECT button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + private PARAM param_(T e, boolean inline) { + return new PARAM("param", e, opt(false, inline, false)); } + + public class IMG extends EImp implements HamletSpec.IMG { + public IMG(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public IMG $src(String value) { + addAttr("src", value); + return this; + } + + @Override + public IMG $alt(String value) { + addAttr("alt", value); + return this; + } + + @Override + public IMG $ismap() { + addAttr("ismap", null); + return this; + } + + @Override + public IMG $height(String value) { + addAttr("height", value); + return this; + } + + @Override + public IMG $height(int value) { + addAttr("height", value); + return this; + } + + @Override + public IMG $width(int value) { + addAttr("width", value); + return this; + } + + @Override + public IMG $width(String value) { + addAttr("width", value); + return this; + } + + @Override + public IMG $usemap(String value) { + addAttr("usemap", value); + return this; + } + + @Override + public IMG $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public IMG $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public IMG $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public IMG $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public IMG $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public IMG $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public IMG $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public IMG $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public IMG $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public IMG $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public IMG $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public IMG $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public IMG $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public IMG $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public IMG $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public IMG $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + public class LINK extends EImp implements HamletSpec.LINK { + public LINK(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public LINK $rel(EnumSet value) { + addRelAttr("rel", value); + return this; + } + + @Override + public LINK $rel(String value) { + addAttr("rel", value); + return this; + } + + @Override + public LINK $href(String value) { + addAttr("href", value); + return this; + } + + @Override + public LINK $type(String value) { + addAttr("type", value); + return this; + } + + @Override + public LINK $media(EnumSet value) { + addMediaAttr("media", value); + return this; + } + + @Override + public LINK $media(String value) { + addAttr("media", value); + return this; + } + + @Override + public LINK $hreflang(String value) { + addAttr("hreflang", value); + return this; + } + + @Override + public LINK $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public LINK $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public LINK $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public LINK $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public LINK $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public LINK $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public LINK $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public LINK $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public LINK $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public LINK $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public LINK $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public LINK $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public LINK $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public LINK $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public LINK $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public LINK $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + public class AREA extends EImp implements HamletSpec.AREA { + public AREA(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public AREA $href(String value) { + addAttr("href", value); + return this; + } + + @Override + public AREA $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public AREA $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public AREA $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public AREA $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public AREA $alt(String value) { + addAttr("alt", value); + return this; + } + + @Override + public AREA $shape(Shape value) { + addAttr("shape", value); + return this; + } + + @Override + public AREA $coords(String value) { + addAttr("coords", value); + return this; + } + + @Override + public AREA $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public AREA $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public AREA $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public AREA $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public AREA $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public AREA $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public AREA $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public AREA $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public AREA $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public AREA $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public AREA $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public AREA $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public AREA $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public AREA $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public AREA $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public AREA $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + } + + private AREA area_(T e, boolean inline) { + return new AREA("area", e, opt(false, inline, false)); } + + public class MAP extends EImp implements HamletSpec.MAP { + public MAP(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public MAP $name(String value) { + addAttr("name", value); + return this; + } + + @Override + public AREA> area() { + closeAttrs(); + return area_(this, false); + } + + @Override + public AREA> area(String selector) { + return setSelector(area(), selector); + } + + @Override + public MAP $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public MAP $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public MAP $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public MAP $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public MAP $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public MAP $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public MAP $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public MAP $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public MAP $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public MAP $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public MAP $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public MAP $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public MAP $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public MAP $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public MAP $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public MAP $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public MAP address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public MAP __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public MAP hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public MAP h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public MAP h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public MAP h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public MAP h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public MAP h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public MAP h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public MAP h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public MAP h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public MAP h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public MAP h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public MAP h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public MAP h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + } + + public class A extends EImp implements HamletSpec.A { + public A(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public A $rel(EnumSet value) { + addRelAttr("rel", value); + return this; + } + + @Override + public A $rel(String value) { + addAttr("rel", value); + return this; + } + + @Override + public A $href(String value) { + addAttr("href", value); + return this; + } + + @Override + public A $type(String value) { + addAttr("type", value); + return this; + } + + @Override + public A $tabindex(int value) { + addAttr("tabindex", value); + return this; + } + + @Override + public A $accesskey(String value) { + addAttr("accesskey", value); + return this; + } + + @Override + public A $onfocus(String value) { + addAttr("onfocus", value); + return this; + } + + @Override + public A $onblur(String value) { + addAttr("onblur", value); + return this; + } + + @Override + public A $hreflang(String value) { + addAttr("hreflang", value); + return this; + } + + @Override + public A $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public A $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public A $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public A $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public A $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public A $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public A $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public A $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public A $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public A $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public A $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public A $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public A $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public A $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public A $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public A $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public A __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public A _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public A b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public A b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public A i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public A i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public A small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public A small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public A em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public A em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public A strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public A strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public A dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public A dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public A code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public A code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public A samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public A samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public A kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public A kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public A var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public A var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public A cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public A cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public A abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public A abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public A img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public A q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public A q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public A br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public A bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public A span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public A span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public A script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public A ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public A del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public A sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public A sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public A sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public A sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public A label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public A textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public A button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class DIV extends EImp implements HamletSpec.DIV { + public DIV(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public DIV $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public DIV $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public DIV $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public DIV $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public DIV $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public DIV $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public DIV $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public DIV $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public DIV $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public DIV $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public DIV $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public DIV $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public DIV $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public DIV $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public DIV $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public DIV $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public DIV address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public DIV __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public DIV hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public DIV h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public DIV h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public DIV h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public DIV h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public DIV h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public DIV h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public DIV h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public DIV h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public DIV h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public DIV h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public DIV h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public DIV h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public DIV __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public DIV _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public DIV b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public DIV b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public DIV i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public DIV i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public DIV small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public DIV small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public DIV em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public DIV em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public DIV strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public DIV strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public DIV dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public DIV dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public DIV code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public DIV code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public DIV samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public DIV samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public DIV kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public DIV kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public DIV var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public DIV var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public DIV cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public DIV cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public DIV abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public DIV abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public DIV a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public DIV a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public DIV img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public DIV sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public DIV sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public DIV sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public DIV sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public DIV q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public DIV q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public DIV br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public DIV bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public DIV span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public DIV span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public DIV script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public DIV ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public DIV del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public DIV label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public DIV textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public DIV button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class ADDRESS extends EImp implements HamletSpec.ADDRESS { + public ADDRESS(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public ADDRESS $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public ADDRESS $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public ADDRESS $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public ADDRESS $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public ADDRESS $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public ADDRESS $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public ADDRESS $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public ADDRESS $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public ADDRESS $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public ADDRESS $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public ADDRESS $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public ADDRESS $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public ADDRESS $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public ADDRESS $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public ADDRESS $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public ADDRESS $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public ADDRESS __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public ADDRESS _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public ADDRESS b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public ADDRESS b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public ADDRESS i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public ADDRESS i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public ADDRESS small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public ADDRESS small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public ADDRESS em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public ADDRESS em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public ADDRESS strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public ADDRESS strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public ADDRESS dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public ADDRESS dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public ADDRESS code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public ADDRESS code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public ADDRESS samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public ADDRESS samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public ADDRESS kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public ADDRESS kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public ADDRESS var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public ADDRESS var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public ADDRESS cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public ADDRESS cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public ADDRESS abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public ADDRESS abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public ADDRESS a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public ADDRESS a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public ADDRESS img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public ADDRESS sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public ADDRESS sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public ADDRESS sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public ADDRESS sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public ADDRESS q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public ADDRESS q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public ADDRESS br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public ADDRESS bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public ADDRESS span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public ADDRESS span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public ADDRESS script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public ADDRESS ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public ADDRESS del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public ADDRESS label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public ADDRESS textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public ADDRESS button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class BODY extends EImp implements HamletSpec.BODY { + public BODY(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public BODY $onload(String value) { + addAttr("onload", value); + return this; + } + + @Override + public BODY $onunload(String value) { + addAttr("onunload", value); + return this; + } + + @Override + public BODY $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public BODY $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public BODY $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public BODY $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public BODY $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public BODY $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public BODY $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public BODY $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public BODY $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public BODY $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public BODY $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public BODY $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public BODY $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public BODY $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public BODY $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public BODY $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public TABLE> table() { + closeAttrs(); + return table_(this, false); + } + + @Override + public TABLE> table(String selector) { + return setSelector(table(), selector); + } + + @Override + public BODY address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS> address() { + closeAttrs(); + return address_(this, false); + } + + @Override + public P> p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P> p() { + closeAttrs(); + return p_(this, false); + } + + @Override + public BODY __(Class cls) { + _v(cls); + return this; + } + + @Override + public HR> hr() { + closeAttrs(); + return hr_(this, false); + } + + @Override + public BODY hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL> dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL> dl() { + closeAttrs(); + return dl_(this, false); + } + + @Override + public DIV> div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV> div() { + closeAttrs(); + return div_(this, false); + } + + @Override + public BLOCKQUOTE> blockquote() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE> bq() { + closeAttrs(); + return blockquote_(this, false); + } + + @Override + public BODY h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1> h1() { + closeAttrs(); + return h1_(this, false); + } + + @Override + public BODY h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public BODY h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2> h2() { + closeAttrs(); + return h2_(this, false); + } + + @Override + public BODY h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3> h3() { + closeAttrs(); + return h3_(this, false); + } + + @Override + public BODY h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public BODY h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4> h4() { + closeAttrs(); + return h4_(this, false); + } + + @Override + public BODY h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public BODY h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5> h5() { + closeAttrs(); + return h5_(this, false); + } + + @Override + public BODY h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public BODY h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6> h6() { + closeAttrs(); + return h6_(this, false); + } + + @Override + public BODY h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public BODY h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL> ul() { + closeAttrs(); + return ul_(this, false); + } + + @Override + public UL> ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL> ol() { + closeAttrs(); + return ol_(this, false); + } + + @Override + public OL> ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE> pre() { + closeAttrs(); + return pre_(this, false); + } + + @Override + public PRE> pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM> form() { + closeAttrs(); + return form_(this, false); + } + + @Override + public FORM> form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET> fieldset() { + closeAttrs(); + return fieldset_(this, false); + } + + @Override + public FIELDSET> fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, false); + } + + @Override + public BODY script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, false); + } + + @Override + public BODY ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, false); + } + + @Override + public BODY del(String cdata) { + return del().__(cdata).__(); + } + } + + private TABLE table_(T e, boolean inline) { + return new TABLE("table", e, opt(true, inline, false)); } + + private ADDRESS address_(T e, boolean inline) { + return new ADDRESS("address", e, opt(true, inline, false)); } + + private P p_(T e, boolean inline) { + return new P("p", e, opt(false, inline, false)); } + + private HR hr_(T e, boolean inline) { + return new HR("hr", e, opt(false, inline, false)); } + + private DL dl_(T e, boolean inline) { + return new DL("dl", e, opt(true, inline, false)); } + + private DIV div_(T e, boolean inline) { + return new DIV("div", e, opt(true, inline, false)); } + + private BLOCKQUOTE blockquote_(T e, boolean inline) { + return new BLOCKQUOTE("blockquote", e, opt(true, inline, false)); } + + private BLOCKQUOTE bq_(T e, boolean inline) { + return new BLOCKQUOTE("blockquote", e, opt(true, inline, false)); } + + private FIELDSET fieldset_(T e, boolean inline) { + return new FIELDSET("fieldset", e, opt(true, inline, false)); } + + private FORM form_(T e, boolean inline) { + return new FORM("form", e, opt(true, inline, false)); } + + public class BR extends EImp implements HamletSpec.BR { + public BR(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public BR $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public BR $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public BR $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public BR $style(String value) { + addAttr("style", value); + return this; + } + } + + public class BDO extends EImp implements HamletSpec.BDO { + public BDO(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public BDO $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public BDO $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public BDO $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public BDO $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public BDO $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public BDO $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public BDO __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public BDO _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public BDO b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public BDO b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public BDO i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public BDO i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public BDO small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public BDO small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public BDO em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public BDO em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public BDO strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public BDO strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public BDO dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public BDO dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public BDO code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public BDO code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public BDO samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public BDO samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public BDO kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public BDO kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public BDO var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public BDO var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public BDO cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public BDO cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public BDO abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public BDO abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public BDO a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public BDO a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public BDO img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public BDO sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public BDO sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public BDO sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public BDO sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public BDO q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public BDO q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public BDO br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public BDO bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public BDO span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public BDO span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public BDO script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public BDO ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public BDO del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public BDO label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public BDO textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public BDO button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class SPAN extends EImp implements HamletSpec.SPAN { + public SPAN(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SPAN $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public SPAN $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public SPAN $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public SPAN $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public SPAN $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public SPAN $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public SPAN $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public SPAN $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public SPAN $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public SPAN $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public SPAN $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public SPAN $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public SPAN $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public SPAN $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public SPAN $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public SPAN $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public SPAN __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public SPAN _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public SPAN b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public SPAN b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public SPAN i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public SPAN i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public SPAN small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public SPAN small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public SPAN em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public SPAN em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public SPAN strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public SPAN strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public SPAN dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public SPAN dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public SPAN code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public SPAN code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public SPAN samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public SPAN samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public SPAN kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public SPAN kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public SPAN var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public SPAN var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public SPAN cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public SPAN cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public SPAN abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public SPAN abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public SPAN a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public SPAN a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public SPAN img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public SPAN sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public SPAN sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public SPAN sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public SPAN sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public SPAN q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public SPAN q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public SPAN br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public SPAN bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public SPAN span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public SPAN span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public SPAN script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public SPAN ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public SPAN del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public SPAN label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public SPAN textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public SPAN button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class SUP extends EImp implements HamletSpec.SUP { + public SUP(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SUP $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public SUP $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public SUP $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public SUP $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public SUP $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public SUP $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public SUP $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public SUP $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public SUP $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public SUP $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public SUP $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public SUP $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public SUP $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public SUP $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public SUP $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public SUP $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public SUP __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public SUP _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public SUP b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public SUP b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public SUP i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public SUP i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public SUP small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public SUP small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public SUP em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public SUP em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public SUP strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public SUP strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public SUP dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public SUP dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public SUP code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public SUP code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public SUP samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public SUP samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public SUP kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public SUP kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public SUP var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public SUP var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public SUP cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public SUP cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public SUP abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public SUP abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public SUP a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public SUP a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public SUP img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public SUP sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public SUP sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public SUP sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public SUP sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public SUP q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public SUP q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public SUP br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public SUP bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public SUP span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public SUP span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public SUP script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public SUP ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public SUP del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public SUP label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public SUP textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public SUP button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class SUB extends EImp implements HamletSpec.SUB { + public SUB(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SUB $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public SUB $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public SUB $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public SUB $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public SUB $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public SUB $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public SUB $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public SUB $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public SUB $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public SUB $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public SUB $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public SUB $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public SUB $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public SUB $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public SUB $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public SUB $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public SUB __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public SUB _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public SUB b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public SUB b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public SUB i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public SUB i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public SUB small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public SUB small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public SUB em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public SUB em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public SUB strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public SUB strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public SUB dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public SUB dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public SUB code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public SUB code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public SUB samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public SUB samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public SUB kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public SUB kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public SUB var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public SUB var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public SUB cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public SUB cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public SUB abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public SUB abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public SUB a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public SUB a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public SUB img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public SUB sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public SUB sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public SUB sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public SUB sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public SUB q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public SUB q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public SUB br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public SUB bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public SUB span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public SUB span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public SUB script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public SUB ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public SUB del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public SUB label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public SUB textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public SUB button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class ACRONYM extends EImp implements HamletSpec.ACRONYM { + public ACRONYM(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public ACRONYM $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public ACRONYM $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public ACRONYM $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public ACRONYM $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public ACRONYM $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public ACRONYM $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public ACRONYM $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public ACRONYM $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public ACRONYM $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public ACRONYM $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public ACRONYM $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public ACRONYM $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public ACRONYM $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public ACRONYM $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public ACRONYM $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public ACRONYM $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public ACRONYM __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public ACRONYM _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public ACRONYM b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public ACRONYM b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public ACRONYM i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public ACRONYM i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public ACRONYM small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public ACRONYM small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public ACRONYM em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public ACRONYM em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public ACRONYM strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public ACRONYM strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public ACRONYM dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public ACRONYM dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public ACRONYM code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public ACRONYM code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public ACRONYM samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public ACRONYM samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public ACRONYM kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public ACRONYM kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public ACRONYM var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public ACRONYM var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public ACRONYM cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public ACRONYM cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public ACRONYM abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public ACRONYM abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public ACRONYM a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public ACRONYM a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public ACRONYM img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public ACRONYM sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public ACRONYM sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public ACRONYM sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public ACRONYM sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public ACRONYM q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public ACRONYM q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public ACRONYM br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public ACRONYM bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public ACRONYM span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public ACRONYM span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public ACRONYM script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public ACRONYM ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public ACRONYM del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public ACRONYM label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public ACRONYM textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public ACRONYM button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class ABBR extends EImp implements HamletSpec.ABBR { + public ABBR(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public ABBR $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public ABBR $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public ABBR $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public ABBR $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public ABBR $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public ABBR $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public ABBR $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public ABBR $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public ABBR $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public ABBR $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public ABBR $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public ABBR $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public ABBR $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public ABBR $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public ABBR $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public ABBR $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public ABBR __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public ABBR _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public ABBR b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public ABBR b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public ABBR i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public ABBR i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public ABBR small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public ABBR small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public ABBR em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public ABBR em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public ABBR strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public ABBR strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public ABBR dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public ABBR dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public ABBR code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public ABBR code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public ABBR samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public ABBR samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public ABBR kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public ABBR kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public ABBR var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public ABBR var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public ABBR cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public ABBR cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public ABBR abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public ABBR abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public ABBR a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public ABBR a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public ABBR img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public ABBR sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public ABBR sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public ABBR sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public ABBR sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public ABBR q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public ABBR q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public ABBR br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public ABBR bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public ABBR span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public ABBR span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public ABBR script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public ABBR ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public ABBR del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public ABBR label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public ABBR textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public ABBR button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class CITE extends EImp implements HamletSpec.CITE { + public CITE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public CITE $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public CITE $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public CITE $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public CITE $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public CITE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public CITE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public CITE $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public CITE $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public CITE $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public CITE $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public CITE $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public CITE $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public CITE $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public CITE $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public CITE $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public CITE $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public CITE __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public CITE _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public CITE b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public CITE b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public CITE i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public CITE i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public CITE small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public CITE small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public CITE em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public CITE em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public CITE strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public CITE strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public CITE dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public CITE dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public CITE code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public CITE code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public CITE samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public CITE samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public CITE kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public CITE kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public CITE var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public CITE var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public CITE cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public CITE cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public CITE abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public CITE abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public CITE a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public CITE a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public CITE img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public CITE sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public CITE sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public CITE sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public CITE sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public CITE q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public CITE q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public CITE br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public CITE bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public CITE span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public CITE span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public CITE script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public CITE ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public CITE del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public CITE label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public CITE textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public CITE button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class VAR extends EImp implements HamletSpec.VAR { + public VAR(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public VAR $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public VAR $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public VAR $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public VAR $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public VAR $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public VAR $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public VAR $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public VAR $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public VAR $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public VAR $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public VAR $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public VAR $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public VAR $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public VAR $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public VAR $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public VAR $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public VAR __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public VAR _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public VAR b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public VAR b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public VAR i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public VAR i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public VAR small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public VAR small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public VAR em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public VAR em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public VAR strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public VAR strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public VAR dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public VAR dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public VAR code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public VAR code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public VAR samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public VAR samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public VAR kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public VAR kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public VAR var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public VAR var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public VAR cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public VAR cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public VAR abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public VAR abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public VAR a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public VAR a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public VAR img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public VAR sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public VAR sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public VAR sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public VAR sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public VAR q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public VAR q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public VAR br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public VAR bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public VAR span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public VAR span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public VAR script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public VAR ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public VAR del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public VAR label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public VAR textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public VAR button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class KBD extends EImp implements HamletSpec.KBD { + public KBD(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public KBD $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public KBD $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public KBD $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public KBD $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public KBD $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public KBD $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public KBD $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public KBD $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public KBD $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public KBD $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public KBD $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public KBD $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public KBD $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public KBD $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public KBD $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public KBD $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public KBD __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public KBD _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public KBD b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public KBD b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public KBD i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public KBD i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public KBD small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public KBD small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public KBD em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public KBD em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public KBD strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public KBD strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public KBD dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public KBD dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public KBD code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public KBD code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public KBD samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public KBD samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public KBD kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public KBD kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public KBD var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public KBD var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public KBD cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public KBD cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public KBD abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public KBD abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public KBD a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public KBD a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public KBD img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public KBD sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public KBD sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public KBD sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public KBD sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public KBD q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public KBD q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public KBD br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public KBD bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public KBD span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public KBD span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public KBD script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public KBD ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public KBD del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public KBD label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public KBD textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public KBD button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class SAMP extends EImp implements HamletSpec.SAMP { + public SAMP(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SAMP $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public SAMP $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public SAMP $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public SAMP $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public SAMP $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public SAMP $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public SAMP $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public SAMP $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public SAMP $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public SAMP $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public SAMP $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public SAMP $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public SAMP $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public SAMP $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public SAMP $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public SAMP $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public SAMP __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public SAMP _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public SAMP b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public SAMP b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public SAMP i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public SAMP i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public SAMP small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public SAMP small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public SAMP em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public SAMP em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public SAMP strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public SAMP strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public SAMP dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public SAMP dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public SAMP code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public SAMP code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public SAMP samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public SAMP samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public SAMP kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public SAMP kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public SAMP var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public SAMP var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public SAMP cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public SAMP cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public SAMP abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public SAMP abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public SAMP a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public SAMP a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public SAMP img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public SAMP sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public SAMP sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public SAMP sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public SAMP sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public SAMP q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public SAMP q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public SAMP br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public SAMP bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public SAMP span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public SAMP span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public SAMP script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public SAMP ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public SAMP del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public SAMP label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public SAMP textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public SAMP button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class CODE extends EImp implements HamletSpec.CODE { + public CODE(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public CODE $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public CODE $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public CODE $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public CODE $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public CODE $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public CODE $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public CODE $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public CODE $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public CODE $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public CODE $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public CODE $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public CODE $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public CODE $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public CODE $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public CODE $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public CODE $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public CODE __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public CODE _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public CODE b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public CODE b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public CODE i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public CODE i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public CODE small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public CODE small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public CODE em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public CODE em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public CODE strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public CODE strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public CODE dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public CODE dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public CODE code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public CODE code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public CODE samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public CODE samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public CODE kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public CODE kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public CODE var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public CODE var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public CODE cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public CODE cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public CODE abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public CODE abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public CODE a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public CODE a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public CODE img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public CODE sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public CODE sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public CODE sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public CODE sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public CODE q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public CODE q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public CODE br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public CODE bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public CODE span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public CODE span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public CODE script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public CODE ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public CODE del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public CODE label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public CODE textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public CODE button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class DFN extends EImp implements HamletSpec.DFN { + public DFN(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public DFN $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public DFN $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public DFN $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public DFN $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public DFN $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public DFN $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public DFN $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public DFN $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public DFN $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public DFN $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public DFN $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public DFN $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public DFN $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public DFN $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public DFN $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public DFN $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public DFN __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public DFN _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public DFN b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public DFN b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public DFN i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public DFN i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public DFN small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public DFN small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public DFN em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public DFN em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public DFN strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public DFN strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public DFN dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public DFN dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public DFN code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public DFN code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public DFN samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public DFN samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public DFN kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public DFN kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public DFN var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public DFN var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public DFN cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public DFN cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public DFN abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public DFN abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public DFN a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public DFN a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public DFN img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public DFN sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public DFN sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public DFN sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public DFN sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public DFN q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public DFN q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public DFN br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public DFN bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public DFN span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public DFN span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public DFN script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public DFN ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public DFN del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public DFN label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public DFN textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public DFN button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class STRONG extends EImp implements HamletSpec.STRONG { + public STRONG(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public STRONG $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public STRONG $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public STRONG $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public STRONG $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public STRONG $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public STRONG $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public STRONG $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public STRONG $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public STRONG $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public STRONG $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public STRONG $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public STRONG $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public STRONG $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public STRONG $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public STRONG $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public STRONG $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public STRONG __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public STRONG _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public STRONG b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public STRONG b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public STRONG i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public STRONG i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public STRONG small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public STRONG small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public STRONG em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public STRONG em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public STRONG strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public STRONG strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public STRONG dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public STRONG dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public STRONG code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public STRONG code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public STRONG samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public STRONG samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public STRONG kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public STRONG kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public STRONG var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public STRONG var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public STRONG cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public STRONG cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public STRONG abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public STRONG abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public STRONG a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public STRONG a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public STRONG img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public STRONG sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public STRONG sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public STRONG sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public STRONG sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public STRONG q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public STRONG q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public STRONG br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public STRONG bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public STRONG span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public STRONG span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public STRONG script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public STRONG ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public STRONG del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public STRONG label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public STRONG textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public STRONG button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class EM extends EImp implements HamletSpec.EM { + public EM(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public EM $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public EM $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public EM $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public EM $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public EM $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public EM $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public EM $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public EM $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public EM $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public EM $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public EM $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public EM $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public EM $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public EM $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public EM $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public EM $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public EM __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public EM _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public EM b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public EM b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public EM i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public EM i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public EM small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public EM small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public EM em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public EM em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public EM strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public EM strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public EM dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public EM dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public EM code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public EM code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public EM samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public EM samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public EM kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public EM kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public EM var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public EM var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public EM cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public EM cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public EM abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public EM abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public EM a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public EM a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public EM img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public EM sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public EM sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public EM sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public EM sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public EM q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public EM q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public EM br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public EM bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public EM span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public EM span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public EM script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public EM ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public EM del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public EM label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public EM textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public EM button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class SMALL extends EImp implements HamletSpec.SMALL { + public SMALL(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public SMALL $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public SMALL $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public SMALL $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public SMALL $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public SMALL $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public SMALL $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public SMALL $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public SMALL $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public SMALL $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public SMALL $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public SMALL $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public SMALL $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public SMALL $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public SMALL $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public SMALL $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public SMALL $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public SMALL __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public SMALL _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public SMALL b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public SMALL b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public SMALL i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public SMALL i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public SMALL small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public SMALL small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public SMALL em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public SMALL em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public SMALL strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public SMALL strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public SMALL dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public SMALL dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public SMALL code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public SMALL code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public SMALL samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public SMALL samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public SMALL kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public SMALL kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public SMALL var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public SMALL var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public SMALL cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public SMALL cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public SMALL abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public SMALL abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public SMALL a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public SMALL a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public SMALL img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public SMALL sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public SMALL sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public SMALL sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public SMALL sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public SMALL q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public SMALL q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public SMALL br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public SMALL bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public SMALL span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public SMALL span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public SMALL script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public SMALL ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public SMALL del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public SMALL label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public SMALL textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public SMALL button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class B extends EImp implements HamletSpec.B { + public B(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public B $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public B $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public B $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public B $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public B $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public B $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public B $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public B $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public B $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public B $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public B $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public B $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public B $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public B $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public B $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public B $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public B __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public B _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public B b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public B b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public B i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public B i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public B small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public B small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public B em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public B em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public B strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public B strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public B dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public B dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public B code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public B code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public B samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public B samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public B kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public B kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public B var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public B var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public B cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public B cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public B abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public B abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public B a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public B a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public B img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public B sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public B sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public B sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public B sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public B q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public B q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public B br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public B bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public B span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public B span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public B script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public B ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public B del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public B label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public B textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public B button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + public class I extends EImp implements HamletSpec.I { + public I(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + @Override + public I $id(String value) { + addAttr("id", value); + return this; + } + + @Override + public I $class(String value) { + addAttr("class", value); + return this; + } + + @Override + public I $title(String value) { + addAttr("title", value); + return this; + } + + @Override + public I $style(String value) { + addAttr("style", value); + return this; + } + + @Override + public I $lang(String value) { + addAttr("lang", value); + return this; + } + + @Override + public I $dir(Dir value) { + addAttr("dir", value); + return this; + } + + @Override + public I $onclick(String value) { + addAttr("onclick", value); + return this; + } + + @Override + public I $ondblclick(String value) { + addAttr("ondblclick", value); + return this; + } + + @Override + public I $onmousedown(String value) { + addAttr("onmousedown", value); + return this; + } + + @Override + public I $onmouseup(String value) { + addAttr("onmouseup", value); + return this; + } + + @Override + public I $onmouseover(String value) { + addAttr("onmouseover", value); + return this; + } + + @Override + public I $onmousemove(String value) { + addAttr("onmousemove", value); + return this; + } + + @Override + public I $onmouseout(String value) { + addAttr("onmouseout", value); + return this; + } + + @Override + public I $onkeypress(String value) { + addAttr("onkeypress", value); + return this; + } + + @Override + public I $onkeydown(String value) { + addAttr("onkeydown", value); + return this; + } + + @Override + public I $onkeyup(String value) { + addAttr("onkeyup", value); + return this; + } + + @Override + public I __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public I _r(Object... lines) { + _p(false, lines); + return this; + } + + @Override + public B> b() { + closeAttrs(); + return b_(this, true); + } + + @Override + public I b(String cdata) { + return b().__(cdata).__(); + } + + @Override + public I b(String selector, String cdata) { + return setSelector(b(), selector).__(cdata).__(); + } + + @Override + public I> i() { + closeAttrs(); + return i_(this, true); + } + + @Override + public I i(String cdata) { + return i().__(cdata).__(); + } + + @Override + public I i(String selector, String cdata) { + return setSelector(i(), selector).__(cdata).__(); + } + + @Override + public SMALL> small() { + closeAttrs(); + return small_(this, true); + } + + @Override + public I small(String cdata) { + return small().__(cdata).__(); + } + + @Override + public I small(String selector, String cdata) { + return setSelector(small(), selector).__(cdata).__(); + } + + @Override + public I em(String cdata) { + return em().__(cdata).__(); + } + + @Override + public EM> em() { + closeAttrs(); + return em_(this, true); + } + + @Override + public I em(String selector, String cdata) { + return setSelector(em(), selector).__(cdata).__(); + } + + @Override + public STRONG> strong() { + closeAttrs(); + return strong_(this, true); + } + + @Override + public I strong(String cdata) { + return strong().__(cdata).__(); + } + + @Override + public I strong(String selector, String cdata) { + return setSelector(strong(), selector).__(cdata).__(); + } + + @Override + public DFN> dfn() { + closeAttrs(); + return dfn_(this, true); + } + + @Override + public I dfn(String cdata) { + return dfn().__(cdata).__(); + } + + @Override + public I dfn(String selector, String cdata) { + return setSelector(dfn(), selector).__(cdata).__(); + } + + @Override + public CODE> code() { + closeAttrs(); + return code_(this, true); + } + + @Override + public I code(String cdata) { + return code().__(cdata).__(); + } + + @Override + public I code(String selector, String cdata) { + return setSelector(code(), selector).__(cdata).__(); + } + + @Override + public I samp(String cdata) { + return samp().__(cdata).__(); + } + + @Override + public SAMP> samp() { + closeAttrs(); + return samp_(this, true); + } + + @Override + public I samp(String selector, String cdata) { + return setSelector(samp(), selector).__(cdata).__(); + } + + @Override + public KBD> kbd() { + closeAttrs(); + return kbd_(this, true); + } + + @Override + public I kbd(String cdata) { + return kbd().__(cdata).__(); + } + + @Override + public I kbd(String selector, String cdata) { + return setSelector(kbd(), selector).__(cdata).__(); + } + + @Override + public VAR> var() { + closeAttrs(); + return var_(this, true); + } + + @Override + public I var(String cdata) { + return var().__(cdata).__(); + } + + @Override + public I var(String selector, String cdata) { + return setSelector(var(), selector).__(cdata).__(); + } + + @Override + public CITE> cite() { + closeAttrs(); + return cite_(this, true); + } + + @Override + public I cite(String cdata) { + return cite().__(cdata).__(); + } + + @Override + public I cite(String selector, String cdata) { + return setSelector(cite(), selector).__(cdata).__(); + } + + @Override + public ABBR> abbr() { + closeAttrs(); + return abbr_(this, true); + } + + @Override + public I abbr(String cdata) { + return abbr().__(cdata).__(); + } + + @Override + public I abbr(String selector, String cdata) { + return setSelector(abbr(), selector).__(cdata).__(); + } + + @Override + public A> a() { + closeAttrs(); + return a_(this, true); + } + + @Override + public A> a(String selector) { + return setSelector(a(), selector); + } + + @Override + public I a(String href, String anchorText) { + return a().$href(href).__(anchorText).__(); + } + + @Override + public I a(String selector, String href, String anchorText) { + return setSelector(a(), selector).$href(href).__(anchorText).__(); + } + + @Override + public IMG> img() { + closeAttrs(); + return img_(this, true); + } + + @Override + public I img(String src) { + return img().$src(src).__(); + } + + @Override + public OBJECT> object() { + closeAttrs(); + return object_(this, true); + } + + @Override + public OBJECT> object(String selector) { + return setSelector(object(), selector); + } + + @Override + public SUB> sub() { + closeAttrs(); + return sub_(this, true); + } + + @Override + public I sub(String cdata) { + return sub().__(cdata).__(); + } + + @Override + public I sub(String selector, String cdata) { + return setSelector(sub(), selector).__(cdata).__(); + } + + @Override + public SUP> sup() { + closeAttrs(); + return sup_(this, true); + } + + @Override + public I sup(String cdata) { + return sup().__(cdata).__(); + } + + @Override + public I sup(String selector, String cdata) { + return setSelector(sup(), selector).__(cdata).__(); + } + + @Override + public MAP> map() { + closeAttrs(); + return map_(this, true); + } + + @Override + public MAP> map(String selector) { + return setSelector(map(), selector); + } + + @Override + public I q(String cdata) { + return q().__(cdata).__(); + } + + @Override + public I q(String selector, String cdata) { + return setSelector(q(), selector).__(cdata).__(); + } + + @Override + public Q> q() { + closeAttrs(); + return q_(this, true); + } + + @Override + public BR> br() { + closeAttrs(); + return br_(this, true); + } + + @Override + public I br(String selector) { + return setSelector(br(), selector).__(); + } + + @Override + public BDO> bdo() { + closeAttrs(); + return bdo_(this, true); + } + + @Override + public I bdo(Dir dir, String cdata) { + return bdo().$dir(dir).__(cdata).__(); + } + + @Override + public SPAN> span() { + closeAttrs(); + return span_(this, true); + } + + @Override + public I span(String cdata) { + return span().__(cdata).__(); + } + + @Override + public I span(String selector, String cdata) { + return setSelector(span(), selector).__(cdata).__(); + } + + @Override + public SCRIPT> script() { + closeAttrs(); + return script_(this, true); + } + + @Override + public I script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public INS> ins() { + closeAttrs(); + return ins_(this, true); + } + + @Override + public I ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL> del() { + closeAttrs(); + return del_(this, true); + } + + @Override + public I del(String cdata) { + return del().__(cdata).__(); + } + + @Override + public LABEL> label() { + closeAttrs(); + return label_(this, true); + } + + @Override + public I label(String forId, String cdata) { + return label().$for(forId).__(cdata).__(); + } + + @Override + public INPUT> input(String selector) { + return setSelector(input(), selector); + } + + @Override + public INPUT> input() { + closeAttrs(); + return input_(this, true); + } + + @Override + public SELECT> select() { + closeAttrs(); + return select_(this, true); + } + + @Override + public SELECT> select(String selector) { + return setSelector(select(), selector); + } + + @Override + public TEXTAREA> textarea(String selector) { + return setSelector(textarea(), selector); + } + + @Override + public TEXTAREA> textarea() { + closeAttrs(); + return textarea_(this, true); + } + + @Override + public I textarea(String selector, String cdata) { + return setSelector(textarea(), selector).__(cdata).__(); + } + + @Override + public BUTTON> button() { + closeAttrs(); + return button_(this, true); + } + + @Override + public BUTTON> button(String selector) { + return setSelector(button(), selector); + } + + @Override + public I button(String selector, String cdata) { + return setSelector(button(), selector).__(cdata).__(); + } + } + + private INPUT input_(T e, boolean inline) { + return new INPUT("input", e, opt(false, inline, false)); } + + private SELECT select_(T e, boolean inline) { + return new SELECT("select", e, opt(true, inline, false)); } + + private TEXTAREA textarea_(T e, boolean inline) { + return new TEXTAREA("textarea", e, opt(true, inline, false)); } + + private BUTTON button_(T e, boolean inline) { + return new BUTTON("button", e, opt(true, inline, false)); } + + private LABEL label_(T e, boolean inline) { + return new LABEL("label", e, opt(true, inline, false)); } + + private MAP map_(T e, boolean inline) { + return new MAP("map", e, opt(true, inline, false)); } + + private Q q_(T e, boolean inline) { + return new Q("q", e, opt(true, inline, false)); } + + private BR br_(T e, boolean inline) { + return new BR("br", e, opt(false, inline, false)); } + + private BDO bdo_(T e, boolean inline) { + return new BDO("bdo", e, opt(true, inline, false)); } + + private SPAN span_(T e, boolean inline) { + return new SPAN("span", e, opt(true, inline, false)); } + + private INS ins_(T e, boolean inline) { + return new INS("ins", e, opt(true, inline, false)); } + + private DEL del_(T e, boolean inline) { + return new DEL("del", e, opt(true, inline, false)); } + + private A a_(T e, boolean inline) { + return new A("a", e, opt(true, inline, false)); } + + private SUB sub_(T e, boolean inline) { + return new SUB("sub", e, opt(true, inline, false)); } + + private SUP sup_(T e, boolean inline) { + return new SUP("sup", e, opt(true, inline, false)); } + + private IMG img_(T e, boolean inline) { + return new IMG("img", e, opt(false, inline, false)); } + + private EM em_(T e, boolean inline) { + return new EM("em", e, opt(true, inline, false)); } + + private STRONG strong_(T e, boolean inline) { + return new STRONG("strong", e, opt(true, inline, false)); } + + private DFN dfn_(T e, boolean inline) { + return new DFN("dfn", e, opt(true, inline, false)); } + + private CODE code_(T e, boolean inline) { + return new CODE("code", e, opt(true, inline, false)); } + + private SAMP samp_(T e, boolean inline) { + return new SAMP("samp", e, opt(true, inline, false)); } + + private KBD kbd_(T e, boolean inline) { + return new KBD("kbd", e, opt(true, inline, false)); } + + private VAR var_(T e, boolean inline) { + return new VAR("var", e, opt(true, inline, false)); } + + private CITE cite_(T e, boolean inline) { + return new CITE("cite", e, opt(true, inline, false)); } + + private ABBR abbr_(T e, boolean inline) { + return new ABBR("abbr", e, opt(true, inline, false)); } + + private B b_(T e, boolean inline) { + return new B("b", e, opt(true, inline, false)); } + + private I i_(T e, boolean inline) { + return new I("i", e, opt(true, inline, false)); } + + private SMALL small_(T e, boolean inline) { + return new SMALL("small", e, opt(true, inline, false)); } + + private PRE pre_(T e, boolean inline) { + return new PRE("pre", e, opt(true, inline, true)); } + + private UL ul_(T e, boolean inline) { + return new UL("ul", e, opt(true, inline, false)); } + + private OL ol_(T e, boolean inline) { + return new OL("ol", e, opt(true, inline, false)); } + + private H1 h1_(T e, boolean inline) { + return new H1("h1", e, opt(true, inline, false)); } + + private H2 h2_(T e, boolean inline) { + return new H2("h2", e, opt(true, inline, false)); } + + private H3 h3_(T e, boolean inline) { + return new H3("h3", e, opt(true, inline, false)); } + + private H4 h4_(T e, boolean inline) { + return new H4("h4", e, opt(true, inline, false)); } + + private H5 h5_(T e, boolean inline) { + return new H5("h5", e, opt(true, inline, false)); } + + private H6 h6_(T e, boolean inline) { + return new H6("h6", e, opt(true, inline, false)); } + + private STYLE style_(T e, boolean inline) { + return new STYLE("style", e, opt(true, inline, false)); } + + private LINK link_(T e, boolean inline) { + return new LINK("link", e, opt(false, inline, false)); } + + private META meta_(T e, boolean inline) { + return new META("meta", e, opt(false, inline, false)); } + + private OBJECT object_(T e, boolean inline) { + return new OBJECT("object", e, opt(true, inline, false)); } + + private SCRIPT script_(T e, boolean inline) { + return new SCRIPT("script", e, opt(true, inline, false)); } + + @Override + public HEAD head() { + return head_(this, false); + } + + @Override + public BODY body() { + return body_(this, false); + } + + @Override + public BODY body(String selector) { + return setSelector(body(), selector); + } + + @Override + public BASE base() { + return base_(this, false); + } + + @Override + public Hamlet base(String href) { + return base().$href(href).__(); + } + + @Override + public TITLE title() { + return title_(this, false); + } + + @Override + public Hamlet title(String cdata) { + return title().__(cdata).__(); + } + + @Override + public STYLE style() { + return style_(this, false); + } + + @Override + public Hamlet style(Object... lines) { + return style().$type("text/css").__(lines).__(); + } + + @Override + public LINK link() { + return link_(this, false); + } + + @Override + public Hamlet link(String href) { + return setLinkHref(link(), href).__(); + } + + @Override + public META meta() { + return meta_(this, false); + } + + @Override + public Hamlet meta(String name, String content) { + return meta().$name(name).$content(content).__(); + } + + @Override + public Hamlet meta_http(String header, String content) { + return meta().$http_equiv(header).$content(content).__(); + } + + @Override + public SCRIPT script() { + return script_(this, false); + } + + @Override + public Hamlet script(String src) { + return setScriptSrc(script(), src).__(); + } + + @Override + public OBJECT object() { + return object_(this, true); + } + + @Override + public OBJECT object(String selector) { + return setSelector(object(), selector); + } + + @Override + public TABLE table() { + return table_(this, false); + } + + @Override + public TABLE table(String selector) { + return setSelector(table(), selector); + } + + @Override + public Hamlet address(String cdata) { + return address().__(cdata).__(); + } + + @Override + public ADDRESS address() { + return address_(this, false); + } + + @Override + public P p(String selector) { + return setSelector(p(), selector); + } + + @Override + public P p() { + return p_(this, false); + } + + @Override + public Hamlet __(Class cls) { + subView(cls); + return this; + } + + @Override + public HR hr() { + return hr_(this, false); + } + + @Override + public Hamlet hr(String selector) { + return setSelector(hr(), selector).__(); + } + + @Override + public DL dl(String selector) { + return setSelector(dl(), selector); + } + + @Override + public DL dl() { + return dl_(this, false); + } + + @Override + public DIV div(String selector) { + return setSelector(div(), selector); + } + + @Override + public DIV div() { + return div_(this, false); + } + + @Override + public BLOCKQUOTE blockquote() { + return blockquote_(this, false); + } + + @Override + public BLOCKQUOTE bq() { + return blockquote_(this, false); + } + + @Override + public Hamlet h1(String cdata) { + return h1().__(cdata).__(); + } + + @Override + public H1 h1() { + return h1_(this, false); + } + + @Override + public Hamlet h1(String selector, String cdata) { + return setSelector(h1(), selector).__(cdata).__(); + } + + @Override + public Hamlet h2(String cdata) { + return h2().__(cdata).__(); + } + + @Override + public H2 h2() { + return h2_(this, false); + } + + @Override + public Hamlet h2(String selector, String cdata) { + return setSelector(h2(), selector).__(cdata).__(); + } + + @Override + public H3 h3() { + return h3_(this, false); + } + + @Override + public Hamlet h3(String cdata) { + return h3().__(cdata).__(); + } + + @Override + public Hamlet h3(String selector, String cdata) { + return setSelector(h3(), selector).__(cdata).__(); + } + + @Override + public H4 h4() { + return h4_(this, false); + } + + @Override + public Hamlet h4(String cdata) { + return h4().__(cdata).__(); + } + + @Override + public Hamlet h4(String selector, String cdata) { + return setSelector(h4(), selector).__(cdata).__(); + } + + @Override + public H5 h5() { + return h5_(this, false); + } + + @Override + public Hamlet h5(String cdata) { + return h5().__(cdata).__(); + } + + @Override + public Hamlet h5(String selector, String cdata) { + return setSelector(h5(), selector).__(cdata).__(); + } + + @Override + public H6 h6() { + return h6_(this, false); + } + + @Override + public Hamlet h6(String cdata) { + return h6().__(cdata).__(); + } + + @Override + public Hamlet h6(String selector, String cdata) { + return setSelector(h6(), selector).__(cdata).__(); + } + + @Override + public UL ul() { + return ul_(this, false); + } + + @Override + public UL ul(String selector) { + return setSelector(ul(), selector); + } + + @Override + public OL ol() { + return ol_(this, false); + } + + @Override + public OL ol(String selector) { + return setSelector(ol(), selector); + } + + @Override + public PRE pre() { + return pre_(this, false); + } + + @Override + public PRE pre(String selector) { + return setSelector(pre(), selector); + } + + @Override + public FORM form() { + return form_(this, false); + } + + @Override + public FORM form(String selector) { + return setSelector(form(), selector); + } + + @Override + public FIELDSET fieldset() { + return fieldset_(this, false); + } + + @Override + public FIELDSET fieldset(String selector) { + return setSelector(fieldset(), selector); + } + + @Override + public INS ins() { + return ins_(this, false); + } + + @Override + public Hamlet ins(String cdata) { + return ins().__(cdata).__(); + } + + @Override + public DEL del() { + return del_(this, false); + } + + @Override + public Hamlet del(String cdata) { + return del().__(cdata).__(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java new file mode 100644 index 00000000000..c6ca93c597c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletGen.java @@ -0,0 +1,449 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.webapp.hamlet2; + +import com.google.common.collect.Sets; + +import java.io.IOException; +import java.io.PrintWriter; +import java.lang.annotation.Annotation; +import java.lang.reflect.Method; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.Set; +import java.util.regex.Pattern; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.webapp.WebAppException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Generates a specific hamlet implementation class from a spec class + * using a generic hamlet implementation class. + */ +@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) +public class HamletGen { + static final Logger LOG = LoggerFactory.getLogger(HamletGen.class); + static final Options opts = new Options(); + static { + opts.addOption("h", "help", false, "Print this help message"). + addOption("s", "spec-class", true, + "The class that holds the spec interfaces. e.g. HamletSpec"). + addOption("i", "impl-class", true, + "An implementation class. e.g. HamletImpl"). + addOption("o", "output-class", true, "Output class name"). + addOption("p", "output-package", true, "Output package name"); + }; + + static final Pattern elementRegex = Pattern.compile("^[A-Z][A-Z0-9]*$"); + + int bytes = 0; + PrintWriter out; + final Set endTagOptional = Sets.newHashSet(); + final Set inlineElements = Sets.newHashSet(); + Class top; // html top-level interface + String hamlet; // output class simple name; + boolean topMode; + + /** + * Generate a specific Hamlet implementation from a spec. + * @param specClass holds hamlet interfaces. e.g. {@link HamletSpec} + * @param implClass a generic hamlet implementation. e.g. {@link HamletImpl} + * @param outputName name of the output class. e.g. {@link Hamlet} + * @param outputPkg package name of the output class. + * @throws IOException + */ + public void generate(Class specClass, Class implClass, + String outputName, String outputPkg) throws IOException { + LOG.info("Generating {} using {} and {}", new Object[]{outputName, + specClass, implClass}); + out = new PrintWriter(outputName +".java", "UTF-8"); + hamlet = basename(outputName); + String pkg = pkgName(outputPkg, implClass.getPackage().getName()); + puts(0, "// Generated by HamletGen. Do NOT edit!\n", + "package ", pkg, ";\n", + "import java.io.PrintWriter;\n", + "import java.util.EnumSet;\n", + "import static java.util.EnumSet.*;\n", + "import static ", implClass.getName(), ".EOpt.*;\n", + "import org.apache.hadoop.yarn.webapp.SubView;"); + String implClassName = implClass.getSimpleName(); + if (!implClass.getPackage().getName().equals(pkg)) { + puts(0, "import ", implClass.getName(), ';'); + } + puts(0, "\n", + "public class ", hamlet, " extends ", implClassName, + " implements ", specClass.getSimpleName(), "._Html {\n", + " public ", hamlet, "(PrintWriter out, int nestLevel,", + " boolean wasInline) {\n", + " super(out, nestLevel, wasInline);\n", + " }\n\n", // inline is context sensitive + " static EnumSet opt(boolean endTag, boolean inline, ", + "boolean pre) {\n", + " EnumSet opts = of(ENDTAG);\n", + " if (!endTag) opts.remove(ENDTAG);\n", + " if (inline) opts.add(INLINE);\n", + " if (pre) opts.add(PRE);\n", + " return opts;\n", + " }"); + initLut(specClass); + genImpl(specClass, implClassName, 1); + LOG.info("Generating {} methods", hamlet); + genMethods(hamlet, top, 1); + puts(0, "}"); + out.close(); + LOG.info("Wrote {} bytes to {}.java", bytes, outputName); + } + + String basename(String path) { + return path.substring(path.lastIndexOf('/') + 1); + } + + String pkgName(String pkg, String defaultPkg) { + if (pkg == null || pkg.isEmpty()) return defaultPkg; + return pkg; + } + + void initLut(Class spec) { + endTagOptional.clear(); + inlineElements.clear(); + for (Class cls : spec.getClasses()) { + Annotation a = cls.getAnnotation(HamletSpec.Element.class); + if (a != null && !((HamletSpec.Element) a).endTag()) { + endTagOptional.add(cls.getSimpleName()); + } + if (cls.getSimpleName().equals("Inline")) { + for (Method method : cls.getMethods()) { + String retName = method.getReturnType().getSimpleName(); + if (isElement(retName)) { + inlineElements.add(retName); + } + } + } + } + } + + void genImpl(Class spec, String implClassName, int indent) { + String specName = spec.getSimpleName(); + for (Class cls : spec.getClasses()) { + String className = cls.getSimpleName(); + if (cls.isInterface()) { + genFactoryMethods(cls, indent); + } + if (isElement(className)) { + LOG.info("Generating class {}", className); + puts(indent, "\n", + "public class ", className, "", + " extends EImp implements ", specName, ".", className, " {\n", + " public ", className, "(String name, T parent,", + " EnumSet opts) {\n", + " super(name, parent, opts);\n", + " }"); + genMethods(className, cls, indent + 1); + puts(indent, "}"); + } else if (className.equals("_Html")) { + top = cls; + } + } + } + + void genFactoryMethods(Class cls, int indent) { + for (Method method : cls.getDeclaredMethods()) { + String retName = method.getReturnType().getSimpleName(); + String methodName = method.getName(); + if (methodName.charAt(0) == '$') continue; + if (isElement(retName) && method.getParameterTypes().length == 0) { + genFactoryMethod(retName, methodName, indent); + } + } + } + + void genMethods(String className, Class cls, int indent) { + topMode = (top != null && cls.equals(top)); + for (Method method : cls.getMethods()) { + String retName = method.getReturnType().getSimpleName(); + if (method.getName().charAt(0) == '$') { + genAttributeMethod(className, method, indent); + } else if (isElement(retName)) { + genNewElementMethod(className, method, indent); + } else { + genCurElementMethod(className, method, indent); + } + } + } + + void genAttributeMethod(String className, Method method, int indent) { + String methodName = method.getName(); + String attrName = methodName.substring(1).replace("__", "-"); + Type[] params = method.getGenericParameterTypes(); + echo(indent, "\n", + "@Override\n", + "public ", className, topMode ? " " : " ", methodName, "("); + if (params.length == 0) { + puts(0, ") {"); + puts(indent, + " addAttr(\"", attrName, "\", null);\n", + " return this;\n", "}"); + } else if (params.length == 1) { + String typeName = getTypeName(params[0]); + puts(0, typeName, " value) {"); + if (typeName.equals("EnumSet")) { + puts(indent, + " addRelAttr(\"", attrName, "\", value);\n", + " return this;\n", "}"); + } else if (typeName.equals("EnumSet")) { + puts(indent, + " addMediaAttr(\"", attrName, "\", value);\n", + " return this;\n", "}"); + } else { + puts(indent, + " addAttr(\"", attrName, "\", value);\n", + " return this;\n", "}"); + } + } else { + throwUnhandled(className, method); + } + } + + String getTypeName(Type type) { + if (type instanceof Class) { + return ((Class)type).getSimpleName(); + } + ParameterizedType pt = (ParameterizedType) type; + return ((Class)pt.getRawType()).getSimpleName() +"<"+ + ((Class)pt.getActualTypeArguments()[0]).getSimpleName() +">"; + } + + void genFactoryMethod(String retName, String methodName, int indent) { + puts(indent, "\n", + "private ", retName, " ", methodName, + "__(T e, boolean inline) {\n", + " return new ", retName, "(\"", StringUtils.toLowerCase(retName), + "\", e, opt(", !endTagOptional.contains(retName), ", inline, ", + retName.equals("PRE"), ")); }"); + } + + void genNewElementMethod(String className, Method method, int indent) { + String methodName = method.getName(); + String retName = method.getReturnType().getSimpleName(); + Class[] params = method.getParameterTypes(); + echo(indent, "\n", + "@Override\n", + "public ", retName, "<", className, topMode ? "> " : "> ", + methodName, "("); + if (params.length == 0) { + puts(0, ") {"); + puts(indent, + topMode ? "" : " closeAttrs();\n", + " return ", StringUtils.toLowerCase(retName), "__" + "(this, ", + isInline(className, retName), ");\n", "}"); + } else if (params.length == 1) { + puts(0, "String selector) {"); + puts(indent, + " return setSelector(", methodName, "(), selector);\n", "}"); + } else { + throwUnhandled(className, method); + } + } + + boolean isInline(String container, String className) { + if ((container.equals("BODY") || container.equals(hamlet) || + container.equals("HEAD") || container.equals("HTML")) && + (className.equals("INS") || className.equals("DEL") || + className.equals("SCRIPT"))) { + return false; + } + return inlineElements.contains(className); + } + + void genCurElementMethod(String className, Method method, int indent) { + String methodName = method.getName(); + Class[] params = method.getParameterTypes(); + if (topMode || params.length > 0) { + echo(indent, "\n", + "@Override\n", + "public ", className, topMode ? " " : " ", methodName, "("); + } + if (params.length == 0) { + if (topMode) { + puts(0, ") {"); + puts(indent, " return this;\n", "}"); + } + } else if (params.length == 1) { + if (methodName.equals("base")) { + puts(0, "String href) {"); + puts(indent, + " return base().$href(href).__();\n", "}"); + } else if (methodName.equals("script")) { + puts(0, "String src) {"); + puts(indent, + " return setScriptSrc(script(), src).__();\n", "}"); + } else if (methodName.equals("style")) { + puts(0, "Object... lines) {"); + puts(indent, + " return style().$type(\"text/css\").__(lines).__();\n", "}"); + } else if (methodName.equals("img")) { + puts(0, "String src) {"); + puts(indent, + " return ", methodName, "().$src(src).__();\n", "}"); + } else if (methodName.equals("br") || methodName.equals("hr") || + methodName.equals("col")) { + puts(0, "String selector) {"); + puts(indent, + " return setSelector(", methodName, "(), selector).__();\n", "}"); + } else if (methodName.equals("link")) { + puts(0, "String href) {"); + puts(indent, + " return setLinkHref(", methodName, "(), href).__();\n", "}"); + } else if (methodName.equals("__")) { + if (params[0].getSimpleName().equals("Class")) { + puts(0, "Class cls) {"); + puts(indent, + " ", topMode ? "subView" : "_v", "(cls);\n", + " return this;\n", "}"); + } else { + puts(0, "Object... lines) {"); + puts(indent, + " _p(", needsEscaping(className), ", lines);\n", + " return this;\n", "}"); + } + } else if (methodName.equals("_r")) { + puts(0, "Object... lines) {"); + puts(indent, + " _p(false, lines);\n", + " return this;\n", "}"); + } else { + puts(0, "String cdata) {"); + puts(indent, + " return ", methodName, "().__(cdata).__();\n", "}"); + } + } else if (params.length == 2) { + if (methodName.equals("meta")) { + puts(0, "String name, String content) {"); + puts(indent, + " return meta().$name(name).$content(content).__();\n", "}"); + } else if (methodName.equals("meta_http")) { + puts(0, "String header, String content) {"); + puts(indent, + " return meta().$http_equiv(header).$content(content).__();\n", + "}"); + } else if (methodName.equals("a")) { + puts(0, "String href, String anchorText) {"); + puts(indent, + " return a().$href(href).__(anchorText).__();\n", "}"); + } else if (methodName.equals("bdo")) { + puts(0, "Dir dir, String cdata) {"); + puts(indent, " return bdo().$dir(dir).__(cdata).__();\n", "}"); + } else if (methodName.equals("label")) { + puts(0, "String forId, String cdata) {"); + puts(indent, " return label().$for(forId).__(cdata).__();\n", "}"); + } else if (methodName.equals("param")) { + puts(0, "String name, String value) {"); + puts(indent, + " return param().$name(name).$value(value).__();\n", "}"); + } else { + puts(0, "String selector, String cdata) {"); + puts(indent, + " return setSelector(", methodName, + "(), selector).__(cdata).__();\n", "}"); + } + } else if (params.length == 3) { + if (methodName.equals("a")) { + puts(0, "String selector, String href, String anchorText) {"); + puts(indent, + " return setSelector(a(), selector)", + ".$href(href).__(anchorText).__();\n", "}"); + } + } else { + throwUnhandled(className, method); + } + } + + static boolean needsEscaping(String eleName) { + return !eleName.equals("SCRIPT") && !eleName.equals("STYLE"); + } + + static void throwUnhandled(String className, Method method) { + throw new WebAppException("Unhandled " + className + "#" + method); + } + + void echo(int indent, Object... args) { + String prev = null; + for (Object o : args) { + String s = String.valueOf(o); + if (!s.isEmpty() && !s.equals("\n") && + (prev == null || prev.endsWith("\n"))) { + indent(indent); + } + prev = s; + out.print(s); + bytes += s.length(); + } + } + + void indent(int indent) { + for (int i = 0; i < indent; ++i) { + out.print(" "); + bytes += 2; + } + } + + void puts(int indent, Object... args) { + echo(indent, args); + out.println(); + ++bytes; + } + + boolean isElement(String s) { + return elementRegex.matcher(s).matches(); + } + + public static void main(String[] args) throws Exception { + CommandLine cmd = new GnuParser().parse(opts, args); + if (cmd.hasOption("help")) { + new HelpFormatter().printHelp("Usage: hbgen [OPTIONS]", opts); + return; + } + // defaults + Class specClass = HamletSpec.class; + Class implClass = HamletImpl.class; + String outputClass = "HamletTmp"; + String outputPackage = implClass.getPackage().getName(); + if (cmd.hasOption("spec-class")) { + specClass = Class.forName(cmd.getOptionValue("spec-class")); + } + if (cmd.hasOption("impl-class")) { + implClass = Class.forName(cmd.getOptionValue("impl-class")); + } + if (cmd.hasOption("output-class")) { + outputClass = cmd.getOptionValue("output-class"); + } + if (cmd.hasOption("output-package")) { + outputPackage = cmd.getOptionValue("output-package"); + } + new HamletGen().generate(specClass, implClass, outputClass, outputPackage); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java new file mode 100644 index 00000000000..995e9fb4912 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java @@ -0,0 +1,385 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.webapp.hamlet2; + +import com.google.common.base.Joiner; +import static com.google.common.base.Preconditions.*; +import com.google.common.base.Splitter; +import com.google.common.collect.Iterables; + +import java.io.PrintWriter; +import java.util.EnumSet; +import static java.util.EnumSet.*; +import java.util.Iterator; + +import static org.apache.commons.lang.StringEscapeUtils.*; +import static org.apache.hadoop.yarn.webapp.hamlet2.HamletImpl.EOpt.*; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.WebAppException; + + +/** + * A simple unbuffered generic hamlet implementation. + * + * Zero copy but allocation on every element, which could be + * optimized to use a thread-local element pool. + * + * Prints HTML as it builds. So the order is important. + */ +@InterfaceAudience.Private +public class HamletImpl extends HamletSpec { + private static final String INDENT_CHARS = " "; + private static final Splitter SS = Splitter.on('.'). + omitEmptyStrings().trimResults(); + private static final Joiner SJ = Joiner.on(' '); + private static final Joiner CJ = Joiner.on(", "); + static final int S_ID = 0; + static final int S_CLASS = 1; + + int nestLevel; + int indents; // number of indent() called. mostly for testing. + private final PrintWriter out; + private final StringBuilder sb = new StringBuilder(); // not shared + private boolean wasInline = false; + + /** + * Element options. (whether it needs end tag, is inline etc.) + */ + public enum EOpt { + /** needs end(close) tag */ + ENDTAG, + /** The content is inline */ + INLINE, + /** The content is preformatted */ + PRE + }; + + /** + * The base class for elements + * @param type of the parent (containing) element for the element + */ + public class EImp implements _Child { + private final String name; + private final T parent; // short cut for parent element + private final EnumSet opts; // element options + + private boolean started = false; + private boolean attrsClosed = false; + + EImp(String name, T parent, EnumSet opts) { + this.name = name; + this.parent = parent; + this.opts = opts; + } + + @Override + public T __() { + closeAttrs(); + --nestLevel; + printEndTag(name, opts); + return parent; + } + + protected void _p(boolean quote, Object... args) { + closeAttrs(); + for (Object s : args) { + if (!opts.contains(PRE)) { + indent(opts); + } + out.print(quote ? escapeHtml(String.valueOf(s)) + : String.valueOf(s)); + if (!opts.contains(INLINE) && !opts.contains(PRE)) { + out.println(); + } + } + } + + protected void _v(Class cls) { + closeAttrs(); + subView(cls); + } + + protected void closeAttrs() { + if (!attrsClosed) { + startIfNeeded(); + ++nestLevel; + out.print('>'); + if (!opts.contains(INLINE) && !opts.contains(PRE)) { + out.println(); + } + attrsClosed = true; + } + } + + protected void addAttr(String name, String value) { + checkState(!attrsClosed, "attribute added after content"); + startIfNeeded(); + printAttr(name, value); + } + + protected void addAttr(String name, Object value) { + addAttr(name, String.valueOf(value)); + } + + protected void addMediaAttr(String name, EnumSet media) { + // 6.13 comma-separated list + addAttr(name, CJ.join(media)); + } + + protected void addRelAttr(String name, EnumSet types) { + // 6.12 space-separated list + addAttr(name, SJ.join(types)); + } + + private void startIfNeeded() { + if (!started) { + printStartTag(name, opts); + started = true; + } + } + + protected void _inline(boolean choice) { + if (choice) { + opts.add(INLINE); + } else { + opts.remove(INLINE); + } + } + + protected void _endTag(boolean choice) { + if (choice) { + opts.add(ENDTAG); + } else { + opts.remove(ENDTAG); + } + } + + protected void _pre(boolean choice) { + if (choice) { + opts.add(PRE); + } else { + opts.remove(PRE); + } + } + } + + public class Generic extends EImp implements PCData { + Generic(String name, T parent, EnumSet opts) { + super(name, parent, opts); + } + + public Generic _inline() { + super._inline(true); + return this; + } + + public Generic _noEndTag() { + super._endTag(false); + return this; + } + + public Generic _pre() { + super._pre(true); + return this; + } + + public Generic _attr(String name, String value) { + addAttr(name, value); + return this; + } + + public Generic> _elem(String name, EnumSet opts) { + closeAttrs(); + return new Generic>(name, this, opts); + } + + public Generic> elem(String name) { + return _elem(name, of(ENDTAG)); + } + + @Override + public Generic __(Object... lines) { + _p(true, lines); + return this; + } + + @Override + public Generic _r(Object... lines) { + _p(false, lines); + return this; + } + } + + public HamletImpl(PrintWriter out, int nestLevel, boolean wasInline) { + this.out = out; + this.nestLevel = nestLevel; + this.wasInline = wasInline; + } + + public int nestLevel() { + return nestLevel; + } + + public boolean wasInline() { + return wasInline; + } + + public void setWasInline(boolean state) { + wasInline = state; + } + + public PrintWriter getWriter() { + return out; + } + + /** + * Create a root-level generic element. + * Mostly for testing purpose. + * @param type of the parent element + * @param name of the element + * @param opts {@link EOpt element options} + * @return the element + */ + public + Generic root(String name, EnumSet opts) { + return new Generic(name, null, opts); + } + + public Generic root(String name) { + return root(name, of(ENDTAG)); + } + + protected void printStartTag(String name, EnumSet opts) { + indent(opts); + sb.setLength(0); + out.print(sb.append('<').append(name).toString()); // for easier mock test + } + + protected void indent(EnumSet opts) { + if (opts.contains(INLINE) && wasInline) { + return; + } + if (wasInline) { + out.println(); + } + wasInline = opts.contains(INLINE) || opts.contains(PRE); + for (int i = 0; i < nestLevel; ++i) { + out.print(INDENT_CHARS); + } + ++indents; + } + + protected void printEndTag(String name, EnumSet opts) { + if (!opts.contains(ENDTAG)) { + return; + } + if (!opts.contains(PRE)) { + indent(opts); + } else { + wasInline = opts.contains(INLINE); + } + sb.setLength(0); + out.print(sb.append("').toString()); // ditto + if (!opts.contains(INLINE)) { + out.println(); + } + } + + protected void printAttr(String name, String value) { + sb.setLength(0); + sb.append(' ').append(name); + if (value != null) { + sb.append("=\"").append(escapeHtml(value)).append("\""); + } + out.print(sb.toString()); + } + + /** + * Sub-classes should override this to do something interesting. + * @param cls the sub-view class + */ + protected void subView(Class cls) { + indent(of(ENDTAG)); // not an inline view + sb.setLength(0); + out.print(sb.append('[').append(cls.getName()).append(']').toString()); + out.println(); + } + + /** + * Parse selector into id and classes + * @param selector in the form of (#id)?(.class)* + * @return an two element array [id, "space-separated classes"]. + * Either element could be null. + * @throws WebAppException when both are null or syntax error. + */ + public static String[] parseSelector(String selector) { + String[] result = new String[]{null, null}; + Iterable rs = SS.split(selector); + Iterator it = rs.iterator(); + if (it.hasNext()) { + String maybeId = it.next(); + if (maybeId.charAt(0) == '#') { + result[S_ID] = maybeId.substring(1); + if (it.hasNext()) { + result[S_CLASS] = SJ.join(Iterables.skip(rs, 1)); + } + } else { + result[S_CLASS] = SJ.join(rs); + } + return result; + } + throw new WebAppException("Error parsing selector: "+ selector); + } + + /** + * Set id and/or class attributes for an element. + * @param type of the element + * @param e the element + * @param selector Haml form of "(#id)?(.class)*" + * @return the element + */ + public static E setSelector(E e, String selector) { + String[] res = parseSelector(selector); + if (res[S_ID] != null) { + e.$id(res[S_ID]); + } + if (res[S_CLASS] != null) { + e.$class(res[S_CLASS]); + } + return e; + } + + public static E setLinkHref(E e, String href) { + if (href.endsWith(".css")) { + e.$rel("stylesheet"); // required in html5 + } + e.$href(href); + return e; + } + + public static E setScriptSrc(E e, String src) { + if (src.endsWith(".js")) { + e.$type("text/javascript"); // required in html4 + } + e.$src(src); + return e; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java new file mode 100644 index 00000000000..8aeba93f098 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java @@ -0,0 +1,3101 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.webapp.hamlet2; + +import java.lang.annotation.*; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.yarn.webapp.SubView; + +/** + * HTML5 compatible HTML4 builder interfaces. + * + *

Generated from HTML 4.01 strict DTD and HTML5 diffs. + *
cf. http://www.w3.org/TR/html4/ + *
cf. http://www.w3.org/TR/html5-diff/ + *

The omitted attributes and elements (from the 4.01 DTD) + * are for HTML5 compatibility. + * + *

Note, the common argument selector uses the same syntax as Haml/Sass: + *

  selector ::= (#id)?(.class)*
+ * cf. http://haml-lang.com/ + * + *

The naming convention used in this class is slightly different from + * normal classes. A CamelCase interface corresponds to an entity in the DTD. + * _CamelCase is for internal refactoring. An element builder interface is in + * UPPERCASE, corresponding to an element definition in the DTD. $lowercase is + * used as attribute builder methods to differentiate from element builder + * methods. + */ +@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) +public class HamletSpec { + // The enum values are lowercase for better compression, + // while avoiding runtime conversion. + // cf. http://www.w3.org/Protocols/HTTP/Performance/Compression/HTMLCanon.html + // http://www.websiteoptimization.com/speed/tweak/lowercase/ + /** %Shape (case-insensitive) */ + public enum Shape { + /** + * rectangle + */ + rect, + /** + * circle + */ + circle, + /** + * polygon + */ + poly, + /** + * default + */ + Default + }; + + /** Values for the %18n dir attribute (case-insensitive) */ + public enum Dir { + /** + * left to right + */ + ltr, + /** + * right to left + */ + rtl + }; + + /** %MediaDesc (case-sensitive) */ + public enum Media { + /** + * computer screen + */ + screen, + /** + * teletype/terminal + */ + tty, + /** + * television + */ + tv, + /** + * projection + */ + projection, + /** + * mobile device + */ + handheld, + /** + * print media + */ + print, + /** + * braille + */ + braille, + /** + * aural + */ + aural, + /** + * suitable all media + */ + all + }; + + /** %LinkTypes (case-insensitive) */ + public enum LinkType { + /** + * + */ + alternate, + /** + * + */ + stylesheet, + /** + * + */ + start, + /** + * + */ + next, + /** + * + */ + prev, + /** + * + */ + contents, + /** + * + */ + index, + /** + * + */ + glossary, + /** + * + */ + copyright, + /** + * + */ + chapter, + /** + * + */ + section, + /** + * + */ + subsection, + /** + * + */ + appendix, + /** + * + */ + help, + /** + * + */ + bookmark + }; + + /** Values for form methods (case-insensitive) */ + public enum Method { + /** + * HTTP GET + */ + get, + /** + * HTTP POST + */ + post + }; + + /** %InputType (case-insensitive) */ + public enum InputType { + /** + * + */ + text, + /** + * + */ + password, + /** + * + */ + checkbox, + /** + * + */ + radio, + /** + * + */ + submit, + /** + * + */ + reset, + /** + * + */ + file, + /** + * + */ + hidden, + /** + * + */ + image, + /** + * + */ + button + }; + + /** Values for button types */ + public enum ButtonType { + /** + * + */ + button, + /** + * + */ + submit, + /** + * + */ + reset + }; + + /** %Scope (case-insensitive) */ + public enum Scope { + /** + * + */ + row, + /** + * + */ + col, + /** + * + */ + rowgroup, + /** + * + */ + colgroup + }; + + /** + * The element annotation for specifying element options other than + * attributes and allowed child elements + */ + @Target({ElementType.TYPE}) + @Retention(RetentionPolicy.RUNTIME) + public @interface Element { + /** + * Whether the start tag is required for the element. + * @return true if start tag is required + */ + boolean startTag() default true; + + /** + * Whether the end tag is required. + * @return true if end tag is required + */ + boolean endTag() default true; + } + + /** + * + */ + public interface __ {} + + /** + * + */ + public interface _Child extends __ { + /** + * Finish the current element. + * @return the parent element + */ + __ __(); + } + + /** + * + */ + public interface _Script { + /** + * Add a script element. + * @return a script element builder + */ + SCRIPT script(); + + /** + * Add a script element + * @param src uri of the script + * @return the current element builder + */ + _Script script(String src); + } + + /** + * + */ + public interface _Object { + /** + * Add an object element. + * @return an object element builder + */ + OBJECT object(); + + /** + * Add an object element. + * @param selector as #id.class etc. + * @return an object element builder + */ + OBJECT object(String selector); + } + + /** %head.misc */ + public interface HeadMisc extends _Script, _Object { + /** + * Add a style element. + * @return a style element builder + */ + STYLE style(); + + /** + * Add a css style element. + * @param lines content of the style sheet + * @return the current element builder + */ + HeadMisc style(Object... lines); + + /** + * Add a meta element. + * @return a meta element builder + */ + META meta(); + + /** + * Add a meta element. + * Shortcut of meta().$name(name).$content(content).__(); + * @param name of the meta element + * @param content of the meta element + * @return the current element builder + */ + HeadMisc meta(String name, String content); + + /** + * Add a meta element with http-equiv attribute. + * Shortcut of
+ * meta().$http_equiv(header).$content(content).__(); + * @param header for the http-equiv attribute + * @param content of the header + * @return the current element builder + */ + HeadMisc meta_http(String header, String content); + + /** + * Add a link element. + * @return a link element builder + */ + LINK link(); + + /** + * Add a link element. + * Implementation should try to figure out type by the suffix of href. + * So link("style.css"); is a shortcut of + * link().$rel("stylesheet").$type("text/css").$href("style.css").__(); + * + * @param href of the link + * @return the current element builder + */ + HeadMisc link(String href); + } + + /** %heading */ + public interface Heading { + /** + * Add an H1 element. + * @return a new H1 element builder + */ + H1 h1(); + + /** + * Add a complete H1 element. + * @param cdata the content of the element + * @return the current element builder + */ + Heading h1(String cdata); + + /** + * Add a complete H1 element + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + Heading h1(String selector, String cdata); + + /** + * Add an H2 element. + * @return a new H2 element builder + */ + H2 h2(); + + /** + * Add a complete H2 element. + * @param cdata the content of the element + * @return the current element builder + */ + Heading h2(String cdata); + + /** + * Add a complete H1 element + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + Heading h2(String selector, String cdata); + + /** + * Add an H3 element. + * @return a new H3 element builder + */ + H3 h3(); + + /** + * Add a complete H3 element. + * @param cdata the content of the element + * @return the current element builder + */ + Heading h3(String cdata); + + /** + * Add a complete H1 element + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + Heading h3(String selector, String cdata); + + /** + * Add an H4 element. + * @return a new H4 element builder + */ + H4 h4(); + + /** + * Add a complete H4 element. + * @param cdata the content of the element + * @return the current element builder + */ + Heading h4(String cdata); + + /** + * Add a complete H4 element + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + Heading h4(String selector, String cdata); + + /** + * Add an H5 element. + * @return a new H5 element builder + */ + H5 h5(); + + /** + * Add a complete H5 element. + * @param cdata the content of the element + * @return the current element builder + */ + Heading h5(String cdata); + + /** + * Add a complete H5 element + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + Heading h5(String selector, String cdata); + + /** + * Add an H6 element. + * @return a new H6 element builder + */ + H6 h6(); + + /** + * Add a complete H6 element. + * @param cdata the content of the element + * @return the current element builder + */ + Heading h6(String cdata); + + /** + * Add a complete H6 element. + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + Heading h6(String selector, String cdata); + } + + /** %list */ + public interface Listing { + + /** + * Add a UL (unordered list) element. + * @return a new UL element builder + */ + UL ul(); + + /** + * Add a UL (unordered list) element. + * @param selector the css selector in the form of (#id)?(.class)* + * @return a new UL element builder + */ + UL ul(String selector); + + /** + * Add a OL (ordered list) element. + * @return a new UL element builder + */ + OL ol(); + + /** + * Add a OL (ordered list) element. + * @param selector the css selector in the form of (#id)?(.class)* + * @return a new UL element builder + */ + OL ol(String selector); + } + + /** % preformatted */ + public interface Preformatted { + + /** + * Add a PRE (preformatted) element. + * @return a new PRE element builder + */ + PRE pre(); + + /** + * Add a PRE (preformatted) element. + * @param selector the css selector in the form of (#id)?(.class)* + * @return a new PRE element builder + */ + PRE pre(String selector); + } + + /** %coreattrs */ + public interface CoreAttrs { + /** document-wide unique id + * @param id the id + * @return the current element builder + */ + CoreAttrs $id(String id); + + /** space-separated list of classes + * @param cls the classes + * @return the current element builder + */ + CoreAttrs $class(String cls); + + /** associated style info + * @param style the style + * @return the current element builder + */ + CoreAttrs $style(String style); + + /** advisory title + * @param title the title + * @return the current element builder + */ + CoreAttrs $title(String title); + } + + /** %i18n */ + public interface I18nAttrs { + /** language code + * @param lang the code + * @return the current element builder + */ + I18nAttrs $lang(String lang); + + /** direction for weak/neutral text + * @param dir the {@link Dir} value + * @return the current element builder + */ + I18nAttrs $dir(Dir dir); + } + + /** %events */ + public interface EventsAttrs { + + /** a pointer button was clicked + * @param onclick the script + * @return the current element builder + */ + EventsAttrs $onclick(String onclick); + + /** a pointer button was double clicked + * @param ondblclick the script + * @return the current element builder + */ + EventsAttrs $ondblclick(String ondblclick); + + /** a pointer button was pressed down + * @param onmousedown the script + * @return the current element builder + */ + EventsAttrs $onmousedown(String onmousedown); + + /** a pointer button was released + * @param onmouseup the script + * @return the current element builder + */ + EventsAttrs $onmouseup(String onmouseup); + + /** a pointer was moved onto + * @param onmouseover the script + * @return the current element builder + */ + EventsAttrs $onmouseover(String onmouseover); + + /** a pointer was moved within + * @param onmousemove the script + * @return the current element builder + */ + EventsAttrs $onmousemove(String onmousemove); + + /** a pointer was moved away + * @param onmouseout the script + * @return the current element builder + */ + EventsAttrs $onmouseout(String onmouseout); + + /** a key was pressed and released + * @param onkeypress the script + * @return the current element builder + */ + EventsAttrs $onkeypress(String onkeypress); + + /** a key was pressed down + * @param onkeydown the script + * @return the current element builder + */ + EventsAttrs $onkeydown(String onkeydown); + + /** a key was released + * @param onkeyup the script + * @return the current element builder + */ + EventsAttrs $onkeyup(String onkeyup); + } + + /** %attrs */ + public interface Attrs extends CoreAttrs, I18nAttrs, EventsAttrs { + } + + /** Part of %pre.exclusion */ + public interface _FontSize extends _Child { + // BIG omitted cf. http://www.w3.org/TR/html5-diff/ + + /** + * Add a SMALL (small print) element + * @return a new SMALL element builder + */ + SMALL small(); + + /** + * Add a complete small (small print) element. + * Shortcut of: small().__(cdata).__(); + * @param cdata the content of the element + * @return the current element builder + */ + _FontSize small(String cdata); + + /** + * Add a complete small (small print) element. + * Shortcut of: small().$id(id).$class(class).__(cdata).__(); + * @param selector css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + _FontSize small(String selector, String cdata); + } + + /** %fontstyle -(%pre.exclusion) */ + public interface _FontStyle extends _Child { + // TT omitted + + /** + * Add an I (italic, alt voice/mood) element. + * @return the new I element builder + */ + I i(); + + /** + * Add a complete I (italic, alt voice/mood) element. + * @param cdata the content of the element + * @return the current element builder + */ + _FontStyle i(String cdata); + + /** + * Add a complete I (italic, alt voice/mood) element. + * @param selector the css selector in the form of (#id)?(.class)* + * @param cdata the content of the element + * @return the current element builder + */ + _FontStyle i(String selector, String cdata); + + /** + * Add a new B (bold/important) element. + * @return a new B element builder + */ + B b(); + + /** + * Add a complete B (bold/important) element. + * @param cdata the content + * @return the current element builder + */ + _FontStyle b(String cdata); + + /** + * Add a complete B (bold/important) element. + * @param selector the css select (#id)?(.class)* + * @param cdata the content + * @return the current element builder + */ + _FontStyle b(String selector, String cdata); + } + + /** %fontstyle */ + public interface FontStyle extends _FontStyle, _FontSize { + } + + /** %phrase */ + public interface Phrase extends _Child { + + /** + * Add an EM (emphasized) element. + * @return a new EM element builder + */ + EM em(); + + /** + * Add an EM (emphasized) element. + * @param cdata the content + * @return the current element builder + */ + Phrase em(String cdata); + + /** + * Add an EM (emphasized) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase em(String selector, String cdata); + + /** + * Add a STRONG (important) element. + * @return a new STRONG element builder + */ + STRONG strong(); + + /** + * Add a complete STRONG (important) element. + * @param cdata the content + * @return the current element builder + */ + Phrase strong(String cdata); + + /** + * Add a complete STRONG (important) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase strong(String selector, String cdata); + + /** + * Add a DFN element. + * @return a new DFN element builder + */ + DFN dfn(); + + /** + * Add a complete DFN element. + * @param cdata the content + * @return the current element builder + */ + Phrase dfn(String cdata); + + /** + * Add a complete DFN element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase dfn(String selector, String cdata); + + /** + * Add a CODE (code fragment) element. + * @return a new CODE element builder + */ + CODE code(); + + /** + * Add a complete CODE element. + * @param cdata the code + * @return the current element builder + */ + Phrase code(String cdata); + + /** + * Add a complete CODE element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the code + * @return the current element builder + */ + Phrase code(String selector, String cdata); + + /** + * Add a SAMP (sample) element. + * @return a new SAMP element builder + */ + SAMP samp(); + + /** + * Add a complete SAMP (sample) element. + * @param cdata the content + * @return the current element builder + */ + Phrase samp(String cdata); + + /** + * Add a complete SAMP (sample) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase samp(String selector, String cdata); + + /** + * Add a KBD (keyboard) element. + * @return a new KBD element builder + */ + KBD kbd(); + + /** + * Add a KBD (keyboard) element. + * @param cdata the content + * @return the current element builder + */ + Phrase kbd(String cdata); + + /** + * Add a KBD (keyboard) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase kbd(String selector, String cdata); + + /** + * Add a VAR (variable) element. + * @return a new VAR element builder + */ + VAR var(); + + /** + * Add a VAR (variable) element. + * @param cdata the content + * @return the current element builder + */ + Phrase var(String cdata); + + /** + * Add a VAR (variable) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase var(String selector, String cdata); + + /** + * Add a CITE element. + * @return a new CITE element builder + */ + CITE cite(); + + /** + * Add a CITE element. + * @param cdata the content + * @return the current element builder + */ + Phrase cite(String cdata); + + /** + * Add a CITE element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase cite(String selector, String cdata); + + /** + * Add an ABBR (abbreviation) element. + * @return a new ABBR element builder + */ + ABBR abbr(); + + /** + * Add a ABBR (abbreviation) element. + * @param cdata the content + * @return the current element builder + */ + Phrase abbr(String cdata); + + /** + * Add a ABBR (abbreviation) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + Phrase abbr(String selector, String cdata); + + // ACRONYM omitted, use ABBR + } + + /** Part of %pre.exclusion */ + public interface _ImgObject extends _Object, _Child { + + /** + * Add a IMG (image) element. + * @return a new IMG element builder + */ + IMG img(); + + /** + * Add a IMG (image) element. + * @param src the source URL of the image + * @return the current element builder + */ + _ImgObject img(String src); + } + + /** Part of %pre.exclusion */ + public interface _SubSup extends _Child { + + /** + * Add a SUB (subscript) element. + * @return a new SUB element builder + */ + SUB sub(); + + /** + * Add a complete SUB (subscript) element. + * @param cdata the content + * @return the current element builder + */ + _SubSup sub(String cdata); + + /** + * Add a complete SUB (subscript) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _SubSup sub(String selector, String cdata); + + /** + * Add a SUP (superscript) element. + * @return a new SUP element builder + */ + SUP sup(); + + /** + * Add a SUP (superscript) element. + * @param cdata the content + * @return the current element builder + */ + _SubSup sup(String cdata); + + /** + * Add a SUP (superscript) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _SubSup sup(String selector, String cdata); + } + + /** + * + */ + public interface _Anchor { + + /** + * Add a A (anchor) element. + * @return a new A element builder + */ + A a(); + + /** + * Add a A (anchor) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new A element builder + */ + A a(String selector); + + /** Shortcut for a().$href(href).__(anchorText).__(); + * @param href the URI + * @param anchorText for the URI + * @return the current element builder + */ + _Anchor a(String href, String anchorText); + + /** Shortcut for a(selector).$href(href).__(anchorText).__(); + * @param selector in the form of (#id)?(.class)* + * @param href the URI + * @param anchorText for the URI + * @return the current element builder + */ + _Anchor a(String selector, String href, String anchorText); + } + + /** + * INS and DEL are unusual for HTML + * "in that they may serve as either block-level or inline elements + * (but not both)". + *
cf. http://www.w3.org/TR/html4/struct/text.html#h-9.4 + *
cf. http://www.w3.org/TR/html5/edits.html#edits + */ + public interface _InsDel { + + /** + * Add an INS (insert) element. + * @return an INS element builder + */ + INS ins(); + + /** + * Add a complete INS element. + * @param cdata inserted data + * @return the current element builder + */ + _InsDel ins(String cdata); + + /** + * Add a DEL (delete) element. + * @return a DEL element builder + */ + DEL del(); + + /** + * Add a complete DEL element. + * @param cdata deleted data + * @return the current element builder + */ + _InsDel del(String cdata); + } + + /** %special -(A|%pre.exclusion) */ + public interface _Special extends _Script, _InsDel { + + /** + * Add a BR (line break) element. + * @return a new BR element builder + */ + BR br(); + + /** + * Add a BR (line break) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return the current element builder + */ + _Special br(String selector); + + /** + * Add a MAP element. + * @return a new MAP element builder + */ + MAP map(); + + /** + * Add a MAP element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new MAP element builder + */ + MAP map(String selector); + + /** + * Add a Q (inline quotation) element. + * @return a q (inline quotation) element builder + */ + Q q(); + + /** + * Add a complete Q element. + * @param cdata the content + * @return the current element builder + */ + _Special q(String cdata); + + /** + * Add a Q element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _Special q(String selector, String cdata); + + /** + * Add a SPAN element. + * @return a new SPAN element builder + */ + SPAN span(); + + /** + * Add a SPAN element. + * @param cdata the content + * @return the current element builder + */ + _Special span(String cdata); + + /** + * Add a SPAN element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _Special span(String selector, String cdata); + + /** + * Add a bdo (bidirectional override) element + * @return a bdo element builder + */ + BDO bdo(); + + /** + * Add a bdo (bidirectional override) element + * @param dir the direction of the text + * @param cdata the text + * @return the current element builder + */ + _Special bdo(Dir dir, String cdata); + } + + /** %special */ + public interface Special extends _Anchor, _ImgObject, _SubSup, _Special { + } + + /** + * + */ + public interface _Label extends _Child { + + /** + * Add a LABEL element. + * @return a new LABEL element builder + */ + LABEL label(); + + /** + * Add a LABEL element. + * Shortcut of label().$for(forId).__(cdata).__(); + * @param forId the for attribute + * @param cdata the content + * @return the current element builder + */ + _Label label(String forId, String cdata); + } + + /** + * + */ + public interface _FormCtrl { + + /** + * Add a INPUT element. + * @return a new INPUT element builder + */ + INPUT input(); + + /** + * Add a INPUT element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new INPUT element builder + */ + INPUT input(String selector); + + /** + * Add a SELECT element. + * @return a new SELECT element builder + */ + SELECT select(); + + /** + * Add a SELECT element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new SELECT element builder + */ + SELECT select(String selector); + + /** + * Add a TEXTAREA element. + * @return a new TEXTAREA element builder + */ + TEXTAREA textarea(); + + /** + * Add a TEXTAREA element. + * @param selector + * @return a new TEXTAREA element builder + */ + TEXTAREA textarea(String selector); + + /** + * Add a complete TEXTAREA element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _FormCtrl textarea(String selector, String cdata); + + /** + * Add a BUTTON element. + * @return a new BUTTON element builder + */ + BUTTON button(); + + /** + * Add a BUTTON element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new BUTTON element builder + */ + BUTTON button(String selector); + + /** + * Add a complete BUTTON element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _FormCtrl button(String selector, String cdata); + } + + /** %formctrl */ + public interface FormCtrl extends _Label, _FormCtrl { + } + + /** + * + */ + public interface _Content extends _Child { + /** + * Content of the element + * @param lines of content + * @return the current element builder + */ + _Content __(Object... lines); + } + + /** + * + */ + public interface _RawContent extends _Child { + /** + * Raw (no need to be HTML escaped) content + * @param lines of content + * @return the current element builder + */ + _RawContent _r(Object... lines); + } + + /** #PCDATA */ + public interface PCData extends _Content, _RawContent { + } + + /** %inline */ + public interface Inline extends PCData, FontStyle, Phrase, Special, FormCtrl { + } + + /** + * + */ + public interface I extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface B extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface SMALL extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface EM extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface STRONG extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface DFN extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface CODE extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface SAMP extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface KBD extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface VAR extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface CITE extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface ABBR extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface ACRONYM extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface SUB extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface SUP extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface SPAN extends Attrs, Inline, _Child { + } + + /** The dir attribute is required for the BDO element */ + public interface BDO extends CoreAttrs, I18nAttrs, Inline, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface BR extends CoreAttrs, _Child { + } + + /** + * + */ + public interface _Form { + + /** + * Add a FORM element. + * @return a new FORM element builder + */ + FORM form(); + + /** + * Add a FORM element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new FORM element builder + */ + FORM form(String selector); + } + + /** + * + */ + public interface _FieldSet { + + /** + * Add a FIELDSET element. + * @return a new FIELDSET element builder + */ + FIELDSET fieldset(); + + /** + * Add a FIELDSET element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new FIELDSET element builder + */ + FIELDSET fieldset(String selector); + } + + /** %block -(FORM|FIELDSET) */ + public interface _Block extends Heading, Listing, Preformatted { + + /** + * Add a P (paragraph) element. + * @return a new P element builder + */ + P p(); + + /** + * Add a P (paragraph) element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new P element builder + */ + P p(String selector); + + /** + * Add a DL (description list) element. + * @return a new DL element builder + */ + DL dl(); + + /** + * Add a DL element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new DL element builder + */ + DL dl(String selector); + + /** + * Add a DIV element. + * @return a new DIV element builder + */ + DIV div(); + + /** + * Add a DIV element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new DIV element builder + */ + DIV div(String selector); + + // NOSCRIPT omitted + // cf. http://www.w3.org/html/wg/tracker/issues/117 + + /** + * Add a BLOCKQUOTE element. + * @return a new BLOCKQUOTE element builder + */ + BLOCKQUOTE blockquote(); + + /** + * Alias of blockquote + * @return a new BLOCKQUOTE element builder + */ + BLOCKQUOTE bq(); + + /** + * Add a HR (horizontal rule) element. + * @return a new HR element builder + */ + HR hr(); + + /** + * Add a HR element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new HR element builder + */ + _Block hr(String selector); + + /** + * Add a TABLE element. + * @return a new TABLE element builder + */ + TABLE table(); + + /** + * Add a TABLE element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new TABLE element builder + */ + TABLE table(String selector); + + /** + * Add a ADDRESS element. + * @return a new ADDRESS element builder + */ + ADDRESS address(); + + /** + * Add a complete ADDRESS element. + * @param cdata the content + * @return the current element builder + */ + _Block address(String cdata); + + /** + * Embed a sub-view. + * @param cls the sub-view class + * @return the current element builder + */ + _Block __(Class cls); + } + + /** %block */ + public interface Block extends _Block, _Form, _FieldSet { + } + + /** %flow */ + public interface Flow extends Block, Inline { + } + + /** + * + */ + public interface _Body extends Block, _Script, _InsDel { + } + + /** + * + */ + public interface BODY extends Attrs, _Body, _Child { + + /** + * The document has been loaded. + * @param script to invoke + * @return the current element builder + */ + BODY $onload(String script); + + /** + * The document has been removed + * @param script to invoke + * @return the current element builder + */ + BODY $onunload(String script); + } + + /** + * + */ + public interface ADDRESS extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface DIV extends Attrs, Flow, _Child { + } + + /** + * + */ + public interface A extends Attrs, _Child, /* %inline -(A) */ + PCData, FontStyle, Phrase, _ImgObject, _Special, + _SubSup, FormCtrl { + // $charset omitted. + + /** advisory content type + * @param cdata the content-type + * @return the current element builder + */ + A $type(String cdata); + + // $name omitted. use id instead. + /** URI for linked resource + * @param uri the URI + * @return the current element builder + */ + A $href(String uri); + + /** language code + * @param cdata the code + * @return the current element builder + */ + A $hreflang(String cdata); + + /** forward link types + * @param linkTypes the types + * @return the current element builder + */ + A $rel(EnumSet linkTypes); + + /** + * forward link types + * @param linkTypes space-separated list of link types + * @return the current element builder. + */ + A $rel(String linkTypes); + + // $rev omitted. Instead of rev="made", use rel="author" + + /** accessibility key character + * @param cdata the key + * @return the current element builder + */ + A $accesskey(String cdata); + + // $shape and coords omitted. use area instead of a for image maps. + /** position in tabbing order + * @param index the index + * @return the current element builder + */ + A $tabindex(int index); + + /** the element got the focus + * @param script to invoke + * @return the current element builder + */ + A $onfocus(String script); + + /** the element lost the focus + * @param script to invoke + * @return the current element builder + */ + A $onblur(String script); + } + + /** + * + */ + public interface MAP extends Attrs, Block, _Child { + + /** + * Add a AREA element. + * @return a new AREA element builder + */ + AREA area(); + + /** + * Add a AREA element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new AREA element builder + */ + AREA area(String selector); + + /** for reference by usemap + * @param name of the map + * @return the current element builder + */ + MAP $name(String name); + } + + /** + * + */ + @Element(endTag=false) + public interface AREA extends Attrs, _Child { + + /** controls interpretation of coords + * @param shape of the area + * @return the current element builder + */ + AREA $shape(Shape shape); + + /** comma-separated list of lengths + * @param cdata coords of the area + * @return the current element builder + */ + AREA $coords(String cdata); + + /** URI for linked resource + * @param uri the URI + * @return the current element builder + */ + AREA $href(String uri); + + // $nohref omitted./ + /** short description + * @param desc the description + * @return the current element builder + */ + AREA $alt(String desc); + + /** position in tabbing order + * @param index of the order + * @return the current element builder + */ + AREA $tabindex(int index); + + /** accessibility key character + * @param cdata the key + * @return the current element builder + */ + AREA $accesskey(String cdata); + + /** the element got the focus + * @param script to invoke + * @return the current element builder + */ + AREA $onfocus(String script); + + /** the element lost the focus + * @param script to invoke + * @return the current element builder + */ + AREA $onblur(String script); + } + + /** + * + */ + @Element(endTag=false) + public interface LINK extends Attrs, _Child { + // $charset omitted + /** URI for linked resource + * @param uri the URI + * @return the current element builder + */ + LINK $href(String uri); + + /** language code + * @param cdata the code + * @return the current element builder + */ + LINK $hreflang(String cdata); + + /** advisory content type + * @param cdata the type + * @return the current element builder + */ + LINK $type(String cdata); + + /** forward link types + * @param linkTypes the types + * @return the current element builder + */ + LINK $rel(EnumSet linkTypes); + + /** + * forward link types. + * @param linkTypes space-separated link types + * @return the current element builder + */ + LINK $rel(String linkTypes); + + // $rev omitted. Instead of rev="made", use rel="author" + + /** for rendering on these media + * @param mediaTypes the media types + * @return the current element builder + */ + LINK $media(EnumSet mediaTypes); + + /** + * for rendering on these media. + * @param mediaTypes comma-separated list of media + * @return the current element builder + */ + LINK $media(String mediaTypes); + } + + /** + * + */ + @Element(endTag=false) + public interface IMG extends Attrs, _Child { + + /** URI of image to embed + * @param uri the URI + * @return the current element builder + */ + IMG $src(String uri); + + /** short description + * @param desc the description + * @return the current element builder + */ + IMG $alt(String desc); + + // $longdesc omitted. use instead + // $name omitted. use id instead. + + /** override height + * @param pixels the height + * @return the current element builder + */ + IMG $height(int pixels); + + /** + * override height + * @param cdata the height (can use %, * etc.) + * @return the current element builder + */ + IMG $height(String cdata); + + /** override width + * @param pixels the width + * @return the current element builder + */ + IMG $width(int pixels); + + /** + * override width + * @param cdata the width (can use %, * etc.) + * @return the current element builder + */ + IMG $width(String cdata); + + /** use client-side image map + * @param uri the URI + * @return the current element builder + */ + IMG $usemap(String uri); + + /** use server-side image map + * @return the current element builder + */ + IMG $ismap(); + } + + /** + * + */ + public interface _Param extends _Child { + + /** + * Add a PARAM (parameter) element. + * @return a new PARAM element builder + */ + PARAM param(); + + /** + * Add a PARAM element. + * Shortcut of param().$name(name).$value(value).__(); + * @param name of the value + * @param value the value + * @return the current element builder + */ + _Param param(String name, String value); + } + + /** + * + */ + public interface OBJECT extends Attrs, _Param, Flow, _Child { + // $declare omitted. repeat element completely + + // $archive, classid, codebase, codetype ommited. use data and type + + /** reference to object's data + * @param uri the URI + * @return the current element builder + */ + OBJECT $data(String uri); + + /** content type for data + * @param contentType the type + * @return the current element builder + */ + OBJECT $type(String contentType); + + // $standby omitted. fix the resource instead. + + /** override height + * @param pixels the height + * @return the current element builder + */ + OBJECT $height(int pixels); + + /** + * override height + * @param length the height (can use %, *) + * @return the current element builder + */ + OBJECT $height(String length); + + /** override width + * @param pixels the width + * @return the current element builder + */ + OBJECT $width(int pixels); + + /** + * override width + * @param length the height (can use %, *) + * @return the current element builder + */ + OBJECT $width(String length); + + /** use client-side image map + * @param uri the URI/name of the map + * @return the current element builder + */ + OBJECT $usemap(String uri); + + /** submit as part of form + * @param cdata the name of the object + * @return the current element builder + */ + OBJECT $name(String cdata); + + /** position in tabbing order + * @param index of the order + * @return the current element builder + */ + OBJECT $tabindex(int index); + } + + /** + * + */ + @Element(endTag=false) + public interface PARAM { + + /** document-wide unique id + * @param cdata the id + * @return the current element builder + */ + PARAM $id(String cdata); + + /** property name. Required. + * @param cdata the name + * @return the current element builder + */ + PARAM $name(String cdata); + + /** property value + * @param cdata the value + * @return the current element builder + */ + PARAM $value(String cdata); + + // $type and valuetype omitted + } + + /** + * + */ + @Element(endTag=false) + public interface HR extends Attrs, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface P extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface H1 extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface H2 extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface H3 extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface H4 extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface H5 extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface H6 extends Attrs, Inline, _Child { + } + + /** + * + */ + public interface PRE extends Attrs, _Child, /* (%inline;)* -(%pre.exclusion) */ + PCData, _FontStyle, Phrase, _Anchor, _Special, + FormCtrl { + } + + /** + * + */ + public interface Q extends Attrs, Inline, _Child { + + /** URI for source document or msg + * @param uri the URI + * @return the current element builder + */ + Q $cite(String uri); + } + + /** + * + */ + public interface BLOCKQUOTE extends Attrs, Block, _Script, _Child { + + /** URI for source document or msg + * @param uri the URI + * @return the current element builder + */ + BLOCKQUOTE $cite(String uri); + } + + /** + * @see _InsDel INS/DEL quirks. + */ + public interface INS extends Attrs, Flow, _Child { + /** info on reason for change + * @param uri + * @return the current element builder + */ + INS $cite(String uri); + + /** date and time of change + * @param datetime + * @return the current element builder + */ + INS $datetime(String datetime); + } + + /** + * @see _InsDel INS/DEL quirks. + */ + public interface DEL extends Attrs, Flow, _Child { + /** info on reason for change + * @param uri the info URI + * @return the current element builder + */ + DEL $cite(String uri); + + /** date and time of change + * @param datetime the time + * @return the current element builder + */ + DEL $datetime(String datetime); + } + + /** + * + */ + public interface _Dl extends _Child { + + /** + * Add a DT (term of the item) element. + * @return a new DT element builder + */ + DT dt(); + + /** + * Add a complete DT element. + * @param cdata the content + * @return the current element builder + */ + _Dl dt(String cdata); + + /** + * Add a DD (definition/description) element. + * @return a new DD element builder + */ + DD dd(); + + /** + * Add a complete DD element. + * @param cdata the content + * @return the current element builder + */ + _Dl dd(String cdata); + } + + /** + * + */ + public interface DL extends Attrs, _Dl, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface DT extends Attrs, Inline, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface DD extends Attrs, Flow, _Child { + } + + /** + * + */ + public interface _Li extends _Child { + + /** + * Add a LI (list item) element. + * @return a new LI element builder + */ + LI li(); + + /** + * Add a LI element. + * @param cdata the content + * @return the current element builder + */ + _Li li(String cdata); + } + + /** + * + */ + public interface OL extends Attrs, _Li, _Child { + } + + /** + * + */ + public interface UL extends Attrs, _Li, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface LI extends Attrs, Flow, _Child { + } + + /** + * + */ + public interface FORM extends Attrs, _Child, /* (%block;|SCRIPT)+ -(FORM) */ + _Script, _Block, _FieldSet { + /** server-side form handler + * @param uri + * @return the current element builder + */ + FORM $action(String uri); + + /** HTTP method used to submit the form + * @param method + * @return the current element builder + */ + FORM $method(Method method); + + /** + * contentype for "POST" method. + * The default is "application/x-www-form-urlencoded". + * Use "multipart/form-data" for input type=file + * @param enctype + * @return the current element builder + */ + FORM $enctype(String enctype); + + /** list of MIME types for file upload + * @param cdata + * @return the current element builder + */ + FORM $accept(String cdata); + + /** name of form for scripting + * @param cdata + * @return the current element builder + */ + FORM $name(String cdata); + + /** the form was submitted + * @param script + * @return the current element builder + */ + FORM $onsubmit(String script); + + /** the form was reset + * @param script + * @return the current element builder + */ + FORM $onreset(String script); + + /** (space and/or comma separated) list of supported charsets + * @param cdata + * @return the current element builder + */ + FORM $accept_charset(String cdata); + } + + /** + * + */ + public interface LABEL extends Attrs, _Child, /* (%inline;)* -(LABEL) */ + PCData, FontStyle, Phrase, Special, _FormCtrl { + /** matches field ID value + * @param cdata + * @return the current element builder + */ + LABEL $for(String cdata); + + /** accessibility key character + * @param cdata + * @return the current element builder + */ + LABEL $accesskey(String cdata); + + /** the element got the focus + * @param script + * @return the current element builder + */ + LABEL $onfocus(String script); + + /** the element lost the focus + * @param script + * @return the current element builder + */ + LABEL $onblur(String script); + } + + /** + * + */ + @Element(endTag=false) + public interface INPUT extends Attrs, _Child { + /** what kind of widget is needed. default is "text". + * @param inputType + * @return the current element builder + */ + INPUT $type(InputType inputType); + + /** submit as part of form + * @param cdata + * @return the current element builder + */ + INPUT $name(String cdata); + + /** Specify for radio buttons and checkboxes + * @param cdata + * @return the current element builder + */ + INPUT $value(String cdata); + + /** for radio buttons and check boxes + * @return the current element builder + */ + INPUT $checked(); + + /** unavailable in this context + * @return the current element builder + */ + INPUT $disabled(); + + /** for text and passwd + * @return the current element builder + */ + INPUT $readonly(); + + /** specific to each type of field + * @param cdata + * @return the current element builder + */ + INPUT $size(String cdata); + + /** max chars for text fields + * @param length + * @return the current element builder + */ + INPUT $maxlength(int length); + + /** for fields with images + * @param uri + * @return the current element builder + */ + INPUT $src(String uri); + + /** short description + * @param cdata + * @return the current element builder + */ + INPUT $alt(String cdata); + + // $usemap omitted. use img instead of input for image maps. + /** use server-side image map + * @return the current element builder + */ + INPUT $ismap(); + + /** position in tabbing order + * @param index + * @return the current element builder + */ + INPUT $tabindex(int index); + + /** accessibility key character + * @param cdata + * @return the current element builder + */ + INPUT $accesskey(String cdata); + + /** the element got the focus + * @param script + * @return the current element builder + */ + INPUT $onfocus(String script); + + /** the element lost the focus + * @param script + * @return the current element builder + */ + INPUT $onblur(String script); + + /** some text was selected + * @param script + * @return the current element builder + */ + INPUT $onselect(String script); + + /** the element value was changed + * @param script + * @return the current element builder + */ + INPUT $onchange(String script); + + /** list of MIME types for file upload (csv) + * @param contentTypes + * @return the current element builder + */ + INPUT $accept(String contentTypes); + } + + /** + * + */ + public interface _Option extends _Child { + /** + * Add a OPTION element. + * @return a new OPTION element builder + */ + OPTION option(); + + /** + * Add a complete OPTION element. + * @param cdata the content + * @return the current element builder + */ + _Option option(String cdata); + } + + /** + * + */ + public interface SELECT extends Attrs, _Option, _Child { + /** + * Add a OPTGROUP element. + * @return a new OPTGROUP element builder + */ + OPTGROUP optgroup(); + + /** field name + * @param cdata + * @return the current element builder + */ + SELECT $name(String cdata); + + /** rows visible + * @param rows + * @return the current element builder + */ + SELECT $size(int rows); + + /** default is single selection + * @return the current element builder + */ + SELECT $multiple(); + + /** unavailable in this context + * @return the current element builder + */ + SELECT $disabled(); + + /** position in tabbing order + * @param index + * @return the current element builder + */ + SELECT $tabindex(int index); + + /** the element got the focus + * @param script + * @return the current element builder + */ + SELECT $onfocus(String script); + + /** the element lost the focus + * @param script + * @return the current element builder + */ + SELECT $onblur(String script); + + /** the element value was changed + * @param script + * @return the current element builder + */ + SELECT $onchange(String script); + } + + /** + * + */ + public interface OPTGROUP extends Attrs, _Option, _Child { + /** unavailable in this context + * @return the current element builder + */ + OPTGROUP $disabled(); + + /** for use in hierarchical menus + * @param cdata + * @return the current element builder + */ + OPTGROUP $label(String cdata); + } + + /** + * + */ + @Element(endTag=false) + public interface OPTION extends Attrs, PCData, _Child { + /** currently selected option + * @return the current element builder + */ + OPTION $selected(); + + /** unavailable in this context + * @return the current element builder + */ + OPTION $disabled(); + + /** for use in hierarchical menus + * @param cdata + * @return the current element builder + */ + OPTION $label(String cdata); + + /** defaults to element content + * @param cdata + * @return the current element builder + */ + OPTION $value(String cdata); + } + + /** + * + */ + public interface TEXTAREA extends Attrs, PCData, _Child { + /** variable name for the text + * @param cdata + * @return the current element builder + */ + TEXTAREA $name(String cdata); + + /** visible rows + * @param rows + * @return the current element builder + */ + TEXTAREA $rows(int rows); + + /** visible columns + * @param cols + * @return the current element builder + */ + TEXTAREA $cols(int cols); + + /** unavailable in this context + * @return the current element builder + */ + TEXTAREA $disabled(); + + /** text is readonly + * @return the current element builder + */ + TEXTAREA $readonly(); + + /** position in tabbing order + * @param index + * @return the current element builder + */ + TEXTAREA $tabindex(int index); + + /** accessibility key character + * @param cdata + * @return the current element builder + */ + TEXTAREA $accesskey(String cdata); + + /** the element got the focus + * @param script + * @return the current element builder + */ + TEXTAREA $onfocus(String script); + + /** the element lost the focus + * @param script + * @return the current element builder + */ + TEXTAREA $onblur(String script); + + /** some text was selected + * @param script + * @return the current element builder + */ + TEXTAREA $onselect(String script); + + /** the element value was changed + * @param script + * @return the current element builder + */ + TEXTAREA $onchange(String script); + } + + /** + * + */ + public interface _Legend extends _Child { + /** + * Add a LEGEND element. + * @return a new LEGEND element builder + */ + LEGEND legend(); + + /** + * Add a LEGEND element. + * @param cdata + * @return the current element builder + */ + _Legend legend(String cdata); + } + + /** + * + */ + public interface FIELDSET extends Attrs, _Legend, PCData, Flow, _Child { + } + + /** + * + */ + public interface LEGEND extends Attrs, Inline, _Child { + /** accessibility key character + * @param cdata + * @return the current element builder + */ + LEGEND $accesskey(String cdata); + } + + /** + * + */ + public interface BUTTON extends /* (%flow;)* -(A|%formctrl|FORM|FIELDSET) */ + _Block, PCData, FontStyle, Phrase, _Special, _ImgObject, _SubSup, Attrs { + /** name of the value + * @param cdata + * @return the current element builder + */ + BUTTON $name(String cdata); + + /** sent to server when submitted + * @param cdata + * @return the current element builder + */ + BUTTON $value(String cdata); + + /** for use as form button + * @param type + * @return the current element builder + */ + BUTTON $type(ButtonType type); + + /** unavailable in this context + * @return the current element builder + */ + BUTTON $disabled(); + + /** position in tabbing order + * @param index + * @return the current element builder + */ + BUTTON $tabindex(int index); + + /** accessibility key character + * @param cdata + * @return the current element builder + */ + BUTTON $accesskey(String cdata); + + /** the element got the focus + * @param script + * @return the current element builder + */ + BUTTON $onfocus(String script); + + /** the element lost the focus + * @param script + * @return the current element builder + */ + BUTTON $onblur(String script); + } + + /** + * + */ + public interface _TableRow { + /** + * Add a TR (table row) element. + * @return a new TR element builder + */ + TR tr(); + + /** + * Add a TR element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new TR element builder + */ + TR tr(String selector); + } + + /** + * + */ + public interface _TableCol extends _Child { + /** + * Add a COL element. + * @return a new COL element builder + */ + COL col(); + + /** + * Add a COL element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return the current element builder + */ + _TableCol col(String selector); + } + + /** + * + */ + public interface _Table extends _TableRow, _TableCol { + /** + * Add a CAPTION element. + * @return a new CAPTION element builder + */ + CAPTION caption(); + + /** + * Add a CAPTION element. + * @param cdata + * @return the current element builder + */ + _Table caption(String cdata); + + /** + * Add a COLGROPU element. + * @return a new COLGROUP element builder + */ + COLGROUP colgroup(); + + /** + * Add a THEAD element. + * @return a new THEAD element builder + */ + THEAD thead(); + + /** + * Add a THEAD element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new THEAD element builder + */ + THEAD thead(String selector); + + /** + * Add a TFOOT element. + * @return a new TFOOT element builder + */ + TFOOT tfoot(); + + /** + * Add a TFOOT element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new TFOOT element builder + */ + TFOOT tfoot(String selector); + + /** + * Add a tbody (table body) element. + * Must be after thead/tfoot and no tr at the same level. + * @return a new tbody element builder + */ + TBODY tbody(); + + /** + * Add a TBODY element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new TBODY element builder + */ + TBODY tbody(String selector); + + // $summary, width, border, frame, rules, cellpadding, cellspacing omitted + // use css instead + } + /** + * TBODY should be used after THEAD/TFOOT, iff there're no TABLE.TR elements. + */ + public interface TABLE extends Attrs, _Table, _Child { + } + + /** + * + */ + public interface CAPTION extends Attrs, Inline, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface THEAD extends Attrs, _TableRow, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface TFOOT extends Attrs, _TableRow, _Child { + } + + /** + * + */ + public interface TBODY extends Attrs, _TableRow, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface COLGROUP extends Attrs, _TableCol, _Child { + /** default number of columns in group. default: 1 + * @param cols + * @return the current element builder + */ + COLGROUP $span(int cols); + + // $width omitted. use css instead. + } + + /** + * + */ + @Element(endTag=false) + public interface COL extends Attrs, _Child { + /** COL attributes affect N columns. default: 1 + * @param cols + * @return the current element builder + */ + COL $span(int cols); + // $width omitted. use css instead. + } + + /** + * + */ + public interface _Tr extends _Child { + /** + * Add a TH element. + * @return a new TH element builder + */ + TH th(); + + /** + * Add a complete TH element. + * @param cdata the content + * @return the current element builder + */ + _Tr th(String cdata); + + /** + * Add a TH element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _Tr th(String selector, String cdata); + + /** + * Add a TD element. + * @return a new TD element builder + */ + TD td(); + + /** + * Add a TD element. + * @param cdata the content + * @return the current element builder + */ + _Tr td(String cdata); + + /** + * Add a TD element. + * @param selector the css selector in the form of (#id)*(.class)* + * @param cdata the content + * @return the current element builder + */ + _Tr td(String selector, String cdata); + } + + /** + * + */ + @Element(endTag=false) + public interface TR extends Attrs, _Tr, _Child { + } + + /** + * + */ + public interface _Cell extends Attrs, Flow, _Child { + // $abbr omited. begin cell text with terse text instead. + // use $title for elaberation, when appropriate. + // $axis omitted. use scope. + /** space-separated list of id's for header cells + * @param cdata + * @return the current element builder + */ + _Cell $headers(String cdata); + + /** scope covered by header cells + * @param scope + * @return the current element builder + */ + _Cell $scope(Scope scope); + + /** number of rows spanned by cell. default: 1 + * @param rows + * @return the current element builder + */ + _Cell $rowspan(int rows); + + /** number of cols spanned by cell. default: 1 + * @param cols + * @return the current element builder + */ + _Cell $colspan(int cols); + } + + /** + * + */ + @Element(endTag=false) + public interface TH extends _Cell { + } + + /** + * + */ + @Element(endTag=false) + public interface TD extends _Cell { + } + + /** + * + */ + public interface _Head extends HeadMisc { + /** + * Add a TITLE element. + * @return a new TITLE element builder + */ + TITLE title(); + + /** + * Add a TITLE element. + * @param cdata the content + * @return the current element builder + */ + _Head title(String cdata); + + /** + * Add a BASE element. + * @return a new BASE element builder + */ + BASE base(); + + /** + * Add a complete BASE element. + * @param uri + * @return the current element builder + */ + _Head base(String uri); + } + + /** + * + */ + public interface HEAD extends I18nAttrs, _Head, _Child { + // $profile omitted + } + + /** + * + */ + public interface TITLE extends I18nAttrs, PCData, _Child { + } + + /** + * + */ + @Element(endTag=false) + public interface BASE extends _Child { + /** URI that acts as base URI + * @param uri + * @return the current element builder + */ + BASE $href(String uri); + } + + /** + * + */ + @Element(endTag=false) + public interface META extends I18nAttrs, _Child { + /** HTTP response header name + * @param header + * @return the current element builder + */ + META $http_equiv(String header); + + /** metainformation name + * @param name + * @return the current element builder + */ + META $name(String name); + + /** associated information + * @param cdata + * @return the current element builder + */ + META $content(String cdata); + + // $scheme omitted + } + + /** + * + */ + public interface STYLE extends I18nAttrs, _Content, _Child { + /** content type of style language + * @param cdata + * @return the current element builder + */ + STYLE $type(String cdata); + + /** designed for use with these media + * @param media + * @return the current element builder + */ + STYLE $media(EnumSet media); + + /** advisory title + * @param cdata + * @return the current element builder + */ + STYLE $title(String cdata); + } + + /** + * + */ + public interface SCRIPT extends _Content, _Child { + /** char encoding of linked resource + * @param cdata + * @return the current element builder + */ + SCRIPT $charset(String cdata); + + /** content type of script language + * @param cdata + * @return the current element builder + */ + SCRIPT $type(String cdata); + + /** URI for an external script + * @param cdata + * @return the current element builder + */ + SCRIPT $src(String cdata); + + /** UA may defer execution of script + * @param cdata + * @return the current element builder + */ + SCRIPT $defer(String cdata); + } + + /** + * + */ + public interface _Html extends _Head, _Body, __ { + /** + * Add a HEAD element. + * @return a new HEAD element builder + */ + HEAD head(); + + /** + * Add a BODY element. + * @return a new BODY element builder + */ + BODY body(); + + /** + * Add a BODY element. + * @param selector the css selector in the form of (#id)*(.class)* + * @return a new BODY element builder + */ + BODY body(String selector); + } + + // There is only one HEAD and BODY, in that order. + /** + * The root element + */ + public interface HTML extends I18nAttrs, _Html { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java new file mode 100644 index 00000000000..64a8447e024 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/package-info.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package is the replacement for org.apache.hadoop.yarn.webapp.hamlet. + * The old package is using _ as a one-character identifier, + * which is banned from JDK9. + */ +@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) +package org.apache.hadoop.yarn.webapp.hamlet2; +import org.apache.hadoop.classification.InterfaceAudience; + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java index 1da6e232ea3..0c7e09e4149 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java @@ -44,10 +44,9 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.PRE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -84,12 +83,12 @@ public class AggregatedLogsBlock extends HtmlBlock { if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { html.h1() - ._("Aggregation is not enabled. Try the nodemanager at " + nodeId) - ._(); + .__("Aggregation is not enabled. Try the nodemanager at " + nodeId) + .__(); if(nmApplicationLogUrl != null) { html.h1() - ._("Or see application log at " + nmApplicationLogUrl) - ._(); + .__("Or see application log at " + nmApplicationLogUrl) + .__(); } return; } @@ -110,18 +109,18 @@ public class AggregatedLogsBlock extends HtmlBlock { .listStatus(remoteAppDir); } catch (FileNotFoundException fnf) { html.h1() - ._("Logs not available for " + logEntity + .__("Logs not available for " + logEntity + ". Aggregation may not be complete, " - + "Check back later or try the nodemanager at " + nodeId)._(); + + "Check back later or try the nodemanager at " + nodeId).__(); if(nmApplicationLogUrl != null) { html.h1() - ._("Or see application log at " + nmApplicationLogUrl) - ._(); + .__("Or see application log at " + nmApplicationLogUrl) + .__(); } return; } catch (Exception ex) { html.h1() - ._("Error getting logs at " + nodeId)._(); + .__("Error getting logs at " + nodeId).__(); return; } @@ -168,9 +167,9 @@ public class AggregatedLogsBlock extends HtmlBlock { if (callerUGI != null && !aclsManager.checkAccess(callerUGI, ApplicationAccessType.VIEW_APP, owner, applicationId)) { html.h1() - ._("User [" + remoteUser + .__("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity - + " in log file [" + thisNodeFile.getPath().getName() + "]")._(); + + " in log file [" + thisNodeFile.getPath().getName() + "]").__(); LOG.error("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity); continue; @@ -188,8 +187,9 @@ public class AggregatedLogsBlock extends HtmlBlock { LOG.error("Error getting logs for " + logEntity, ex); continue; } finally { - if (reader != null) + if (reader != null) { reader.close(); + } } } if (!foundLog) { @@ -201,7 +201,7 @@ public class AggregatedLogsBlock extends HtmlBlock { } } } catch (IOException e) { - html.h1()._("Error getting logs for " + logEntity)._(); + html.h1().__("Error getting logs for " + logEntity).__(); LOG.error("Error getting logs for " + logEntity, e); } } @@ -219,12 +219,12 @@ public class AggregatedLogsBlock extends HtmlBlock { || desiredLogType.equals(logType)) { long logLength = logReader.getCurrentLogLength(); if (foundLog) { - html.pre()._("\n\n")._(); + html.pre().__("\n\n").__(); } - html.p()._("Log Type: " + logType)._(); - html.p()._("Log Upload Time: " + Times.format(logUpLoadTime))._(); - html.p()._("Log Length: " + Long.toString(logLength))._(); + html.p().__("Log Type: " + logType).__(); + html.p().__("Log Upload Time: " + Times.format(logUpLoadTime)).__(); + html.p().__("Log Length: " + Long.toString(logLength)).__(); long start = logLimits.start < 0 ? logLength + logLimits.start : logLimits.start; @@ -238,12 +238,12 @@ public class AggregatedLogsBlock extends HtmlBlock { long toRead = end - start; if (toRead < logLength) { - html.p()._("Showing " + toRead + " bytes of " + logLength + html.p().__("Showing " + toRead + " bytes of " + logLength + " total. Click ") .a(url("logs", $(NM_NODENAME), $(CONTAINER_ID), $(ENTITY_STRING), $(APP_OWNER), logType, "?start=0"), "here"). - _(" for the full log.")._(); + __(" for the full log.").__(); } long totalSkipped = 0; @@ -267,12 +267,12 @@ public class AggregatedLogsBlock extends HtmlBlock { while (toRead > 0 && (len = logReader.read(cbuf, 0, currentToRead)) > 0) { - pre._(new String(cbuf, 0, len)); + pre.__(new String(cbuf, 0, len)); toRead = toRead - len; currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; } - pre._(); + pre.__(); foundLog = true; } @@ -285,7 +285,7 @@ public class AggregatedLogsBlock extends HtmlBlock { private ContainerId verifyAndGetContainerId(Block html) { String containerIdStr = $(CONTAINER_ID); if (containerIdStr == null || containerIdStr.isEmpty()) { - html.h1()._("Cannot get container logs without a ContainerId")._(); + html.h1().__("Cannot get container logs without a ContainerId").__(); return null; } ContainerId containerId = null; @@ -293,8 +293,8 @@ public class AggregatedLogsBlock extends HtmlBlock { containerId = ContainerId.fromString(containerIdStr); } catch (IllegalArgumentException e) { html.h1() - ._("Cannot get container logs for invalid containerId: " - + containerIdStr)._(); + .__("Cannot get container logs for invalid containerId: " + + containerIdStr).__(); return null; } return containerId; @@ -303,15 +303,15 @@ public class AggregatedLogsBlock extends HtmlBlock { private NodeId verifyAndGetNodeId(Block html) { String nodeIdStr = $(NM_NODENAME); if (nodeIdStr == null || nodeIdStr.isEmpty()) { - html.h1()._("Cannot get container logs without a NodeId")._(); + html.h1().__("Cannot get container logs without a NodeId").__(); return null; } NodeId nodeId = null; try { nodeId = NodeId.fromString(nodeIdStr); } catch (IllegalArgumentException e) { - html.h1()._("Cannot get container logs. Invalid nodeId: " + nodeIdStr) - ._(); + html.h1().__("Cannot get container logs. Invalid nodeId: " + nodeIdStr) + .__(); return null; } return nodeId; @@ -320,7 +320,7 @@ public class AggregatedLogsBlock extends HtmlBlock { private String verifyAndGetAppOwner(Block html) { String appOwner = $(APP_OWNER); if (appOwner == null || appOwner.isEmpty()) { - html.h1()._("Cannot get container logs without an app owner")._(); + html.h1().__("Cannot get container logs without an app owner").__(); } return appOwner; } @@ -341,7 +341,7 @@ public class AggregatedLogsBlock extends HtmlBlock { start = Long.parseLong(startStr); } catch (NumberFormatException e) { isValid = false; - html.h1()._("Invalid log start value: " + startStr)._(); + html.h1().__("Invalid log start value: " + startStr).__(); } } @@ -351,7 +351,7 @@ public class AggregatedLogsBlock extends HtmlBlock { end = Long.parseLong(endStr); } catch (NumberFormatException e) { isValid = false; - html.h1()._("Invalid log end value: " + endStr)._(); + html.h1().__("Invalid log end value: " + endStr).__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java index fe83eaad2d6..a6e3a056903 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsNavBlock.java @@ -28,7 +28,7 @@ public class AggregatedLogsNavBlock extends HtmlBlock { protected void render(Block html) { html .div("#nav") - .h3()._("Logs")._() // - ._(); + .h3().__("Logs").__() + .__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java index 773738fed7b..f097b0dec75 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsPage.java @@ -37,7 +37,7 @@ public class AggregatedLogsPage extends TwoColumnLayout { * @see org.apache.hadoop.yarn.server.nodemanager.webapp.NMView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { String logEntity = $(ENTITY_STRING); if (logEntity == null || logEntity.isEmpty()) { logEntity = $(CONTAINER_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index eabd9b3f0e3..fb0a9bf533f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -129,6 +129,20 @@ public class WebAppUtils { return getRMWebAppURLWithoutScheme(conf, false); } + public static String getRouterWebAppURLWithScheme(Configuration conf) { + return getHttpSchemePrefix(conf) + getRouterWebAppURLWithoutScheme(conf); + } + + public static String getRouterWebAppURLWithoutScheme(Configuration conf) { + if (YarnConfiguration.useHttps(conf)) { + return conf.get(YarnConfiguration.ROUTER_WEBAPP_HTTPS_ADDRESS, + YarnConfiguration.DEFAULT_ROUTER_WEBAPP_HTTPS_ADDRESS); + } else { + return conf.get(YarnConfiguration.ROUTER_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_ROUTER_WEBAPP_ADDRESS); + } + } + public static List getProxyHostsAndPortsForAmFilter( Configuration conf) { List addrs = new ArrayList(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java index 68e09ad1ad5..fabb5c12242 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/ErrorPage.java @@ -30,24 +30,24 @@ import org.apache.hadoop.classification.InterfaceAudience; public class ErrorPage extends HtmlPage { @Override - protected void render(Page.HTML<_> html) { + protected void render(Page.HTML<__> html) { set(JQueryUI.ACCORDION_ID, "msg"); String title = "Sorry, got error "+ status(); html. title(title). - link(root_url("static","yarn.css")). - _(JQueryUI.class). // an embedded sub-view + link(root_url("static", "yarn.css")). + __(JQueryUI.class). // an embedded sub-view style("#msg { margin: 1em auto; width: 88%; }", "#msg h1 { padding: 0.2em 1.5em; font: bold 1.3em serif; }"). div("#msg"). h1(title). div(). - _("Please consult"). + __("Please consult"). a("http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html", - "RFC 2616")._(" for meanings of the error code.")._(). + "RFC 2616").__(" for meanings of the error code.").__(). h1("Error Details"). pre(). - _(errorDetails())._()._()._(); + __(errorDetails()).__().__().__(); } protected String errorDetails() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java index ba85ac69d3f..e4d1f2f71c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/FooterBlock.java @@ -25,6 +25,6 @@ public class FooterBlock extends HtmlBlock { @Override protected void render(Block html) { html. - div("#footer.ui-widget")._(); + div("#footer.ui-widget").__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java index 03f0fb1415f..3a0f35a2f34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HeaderBlock.java @@ -31,9 +31,9 @@ public class HeaderBlock extends HtmlBlock { html. div("#header.ui-widget"). div("#user"). - _(loggedIn)._(). + __(loggedIn).__(). div("#logo"). - img("/static/hadoop-st.png")._(). - h1($(TITLE))._(); + img("/static/hadoop-st.png").__(). + h1($(TITLE)).__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java index a785c0c73d0..acf040ee014 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java @@ -25,7 +25,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.webapp.MimeType; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.WebAppException; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public abstract class HtmlBlock extends TextView implements SubView { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java index 1d176d41ad8..210cf0482a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlPage.java @@ -25,17 +25,17 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.webapp.MimeType; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.WebAppException; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; /** * The parent class of all HTML pages. Override - * {@link #render(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)} + * {@link #render(org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.HTML)} * to actually render the page. */ @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public abstract class HtmlPage extends TextView { - public static class _ implements Hamlet._ { + public static class __ implements Hamlet.__ { } public class Page extends Hamlet { @@ -50,8 +50,8 @@ public abstract class HtmlPage extends TextView { setWasInline(context().wasInline()); } - public HTML html() { - return new HTML("html", null, EnumSet.of(EOpt.ENDTAG)); + public HTML html() { + return new HTML("html", null, EnumSet.of(EOpt.ENDTAG)); } } @@ -91,6 +91,6 @@ public abstract class HtmlPage extends TextView { * Render the the HTML page. * @param html the page to render data to. */ - protected abstract void render(Page.HTML<_> html); + protected abstract void render(Page.HTML<__> html); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java index 9fe67f1a52b..0ad8b3c0411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java @@ -26,11 +26,11 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.webapp.ResponseInfo; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TD; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; import com.google.inject.Inject; @@ -47,7 +47,7 @@ public class InfoBlock extends HtmlBlock { div(_INFO_WRAP). table(_INFO). tr(). - th().$class(C_TH).$colspan(2)._(info.about())._()._(); + th().$class(C_TH).$colspan(2).__(info.about()).__().__(); int i = 0; for (ResponseInfo.Item item : info) { TR>> tr = table. @@ -62,23 +62,23 @@ public class InfoBlock extends HtmlBlock { DIV>>>> singleLineDiv; for ( String line :lines) { singleLineDiv = td.div(); - singleLineDiv._(line); - singleLineDiv._(); + singleLineDiv.__(line); + singleLineDiv.__(); } } else { - td._(value); + td.__(value); } - td._(); + td.__(); } else { - tr.td()._r(value)._(); + tr.td()._r(value).__(); } } else { tr. td(). - a(url(item.url), value)._(); + a(url(item.url), value).__(); } - tr._(); + tr.__(); } - table._()._(); + table.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java index 06372e38574..46c76d92d22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.split; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.HTML; +import org.apache.hadoop.yarn.webapp.hamlet2.HamletSpec.HTML; import com.google.common.collect.Lists; @@ -82,19 +82,19 @@ public class JQueryUI extends HtmlBlock { initProgressBars(list); if (!list.isEmpty()) { - html.script().$type("text/javascript")._("$(function() {") - ._(list.toArray())._("});")._(); + html.script().$type("text/javascript").__("$(function() {") + .__(list.toArray()).__("});").__(); } } public static void jsnotice(HTML html) { html. div("#jsnotice.ui-state-error"). - _("This page will not function without javascript enabled." - + " Please enable javascript on your browser.")._(); + __("This page will not function without javascript enabled." + + " Please enable javascript on your browser.").__(); html. script().$type("text/javascript"). - _("$('#jsnotice').hide();")._(); + __("$('#jsnotice').hide();").__(); } protected void initAccordions(List list) { @@ -130,7 +130,7 @@ public class JQueryUI extends HtmlBlock { // for inserting stateSaveInit int pos = init.indexOf('{') + 1; init = new StringBuffer(init).insert(pos, stateSaveInit).toString(); - list.add(join(id,"DataTable = $('#", id, "').dataTable(", init, + list.add(join(id, "DataTable = $('#", id, "').dataTable(", init, ").fnSetFilteringDelay(188);")); String postInit = $(postInitID(DATATABLES, id)); if(!postInit.isEmpty()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java index 4781a200ac5..a4b6f63d385 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/LipsumBlock.java @@ -27,7 +27,7 @@ public class LipsumBlock extends HtmlBlock { public void render(Block html) { html. p(). - _("Lorem ipsum dolor sit amet, consectetur adipiscing elit.", + __("Lorem ipsum dolor sit amet, consectetur adipiscing elit.", "Vivamus eu dui in ipsum tincidunt egestas ac sed nibh.", "Praesent quis nisl lorem, nec interdum urna.", "Duis sagittis dignissim purus sed sollicitudin.", @@ -45,6 +45,6 @@ public class LipsumBlock extends HtmlBlock { "Proin eu ante nisl, vel porttitor eros.", "Aliquam gravida luctus augue, at scelerisque enim consectetur vel.", "Donec interdum tempor nisl, quis laoreet enim venenatis eu.", - "Quisque elit elit, vulputate eget porta vel, laoreet ac lacus.")._(); + "Quisque elit elit, vulputate eget porta vel, laoreet ac lacus.").__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java index cdc13eb59cb..a684eee6a4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/NavBlock.java @@ -30,12 +30,12 @@ public class NavBlock extends HtmlBlock { ul(). li("Item 1"). li("Item 2"). - li("...")._(). + li("...").__(). h3("Tools"). ul(). - li().a("/conf", "Configuration")._(). - li().a("/stacks", "Thread dump")._(). - li().a("/logs", "Logs")._(). - li().a("/jmx?qry=Hadoop:*", "Metrics")._()._()._(); + li().a("/conf", "Configuration").__(). + li().a("/stacks", "Thread dump").__(). + li().a("/logs", "Logs").__(). + li().a("/jmx?qry=Hadoop:*", "Metrics").__().__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java index 23aa51bb1ea..3e831996958 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnCssLayout.java @@ -31,11 +31,11 @@ import org.apache.hadoop.yarn.webapp.SubView; @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class TwoColumnCssLayout extends HtmlPage { - @Override protected void render(Page.HTML<_> html) { + @Override protected void render(Page.HTML<__> html) { preHead(html); html. title($("title")). - link(root_url("static","yarn.css")). + link(root_url("static", "yarn.css")). style(".main { min-height: 100%; height: auto !important; height: 100%;", " margin: 0 auto -4em; border: 0; }", ".footer, .push { height: 4em; clear: both; border: 0 }", @@ -50,28 +50,28 @@ public class TwoColumnCssLayout extends HtmlPage { " right: 100%; overflow: hidden; }", ".leftnav .nav { float: left; width: 11em; position: relative;", " right: 12em; overflow: hidden; }"). - _(JQueryUI.class); + __(JQueryUI.class); postHead(html); JQueryUI.jsnotice(html); html. div(".main.ui-widget-content"). - _(header()). + __(header()). div(".cmask.leftnav"). div(".c1right"). div(".c1wrap"). div(".content"). - _(content())._()._(). + __(content()).__().__(). div(".nav"). - _(nav()). - div(".push")._()._()._()._()._(). + __(nav()). + div(".push").__().__().__().__().__(). div(".footer.ui-widget-content"). - _(footer())._()._(); + __(footer()).__().__(); } - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { } - protected void postHead(Page.HTML<_> html) { + protected void postHead(Page.HTML<__> html) { } protected Class header() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java index 4d7752dddd8..fe71395bf93 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/TwoColumnLayout.java @@ -39,18 +39,18 @@ public class TwoColumnLayout extends HtmlPage { * (non-Javadoc) * @see org.apache.hadoop.yarn.webapp.view.HtmlPage#render(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ - @Override protected void render(Page.HTML<_> html) { + @Override protected void render(Page.HTML<__> html) { preHead(html); html. title($(TITLE)). - link(root_url("static","yarn.css")). + link(root_url("static", "yarn.css")). style("#layout { height: 100%; }", "#layout thead td { height: 3em; }", "#layout #navcell { width: 11em; padding: 0 1em; }", "#layout td.content { padding-top: 0 }", "#layout tbody { vertical-align: top; }", "#layout tfoot td { height: 4em; }"). - _(JQueryUI.class); + __(JQueryUI.class); postHead(html); JQueryUI.jsnotice(html); html. @@ -58,17 +58,17 @@ public class TwoColumnLayout extends HtmlPage { thead(). tr(). td().$colspan(2). - _(header())._()._()._(). + __(header()).__().__().__(). tfoot(). tr(). td().$colspan(2). - _(footer())._()._()._(). + __(footer()).__().__().__(). tbody(). tr(). td().$id("navcell"). - _(nav())._(). + __(nav()).__(). td().$class("content"). - _(content())._()._()._()._()._(); + __(content()).__().__().__().__().__(); } /** @@ -76,14 +76,14 @@ public class TwoColumnLayout extends HtmlPage { * involves setting page variables for Javascript and CSS rendering. * @param html the html to use to render. */ - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { } /** * Do what needs to be done after the header is rendered. * @param html the html to use to render. */ - protected void postHead(Page.HTML<_> html) { + protected void postHead(Page.HTML<__> html) { } /** @@ -120,7 +120,7 @@ public class TwoColumnLayout extends HtmlPage { * @param tableId the ID of the table to set styles on. * @param innerStyles any other styles to add to the table. */ - protected void setTableStyles(Page.HTML<_> html, String tableId, + protected void setTableStyles(Page.HTML<__> html, String tableId, String... innerStyles) { List styles = Lists.newArrayList(); styles.add(join('#', tableId, "_paginate span {font-weight:normal}")); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 7ddcfcd969e..f93de4460ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -72,6 +72,14 @@ + + + If set to true, then ALL container updates will be automatically sent to + the NM in the next heartbeat + yarn.resourcemanager.auto-update.containers + false + + The number of threads used to handle applications manager requests. yarn.resourcemanager.client.thread-count @@ -134,7 +142,7 @@ - This configures the HTTP endpoint for Yarn Daemons.The following + This configures the HTTP endpoint for YARN Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https @@ -451,31 +459,6 @@ ${yarn.resourcemanager.max-completed-applications} - - Host:Port of the ZooKeeper server to be used by the RM. This - must be supplied when using the ZooKeeper based implementation of the - RM state store and/or embedded automatic failover in an HA setting. - - yarn.resourcemanager.zk-address - - - - - Number of times RM tries to connect to ZooKeeper. - yarn.resourcemanager.zk-num-retries - 1000 - - - - Retry interval in milliseconds when connecting to ZooKeeper. - When HA is enabled, the value here is NOT used. It is generated - automatically from yarn.resourcemanager.zk-timeout-ms and - yarn.resourcemanager.zk-num-retries. - - yarn.resourcemanager.zk-retry-interval-ms - 1000 - - Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using @@ -485,22 +468,6 @@ /rmstore - - ZooKeeper session timeout in milliseconds. Session expiration - is managed by the ZooKeeper cluster itself, not by the client. This value is - used by the cluster to determine when the client's session expires. - Expirations happens when the cluster does not hear from the client within - the specified session timeout period (i.e. no heartbeat). - yarn.resourcemanager.zk-timeout-ms - 10000 - - - - ACL's to be used for ZooKeeper znodes. - yarn.resourcemanager.zk-acl - world:anyone:rwcda - - ACLs to be used for the root znode when using ZKRMStateStore in an HA @@ -526,18 +493,6 @@ yarn.resourcemanager.zk-state-store.root-node.acl - - - Specify the auths to be used for the ACL's specified in both the - yarn.resourcemanager.zk-acl and - yarn.resourcemanager.zk-state-store.root-node.acl properties. This - takes a comma-separated list of authentication mechanisms, each of the - form 'scheme:auth' (the same syntax used for the 'addAuth' command in - the ZK CLI). - - yarn.resourcemanager.zk-auth - - URI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using @@ -1063,14 +1018,14 @@ DeletionService will delete the application's localized file directory and log directory. - To diagnose Yarn application problems, set this property's value large + To diagnose YARN application problems, set this property's value large enough (for example, to 600 = 10 minutes) to permit examination of these directories. After changing the property's value, you must restart the nodemanager in order for it to have an effect. - The roots of Yarn applications' work directories is configurable with + The roots of YARN applications' work directories is configurable with the yarn.nodemanager.local-dirs property (see below), and the roots - of the Yarn applications' log directories is configurable with the + of the YARN applications' log directories is configurable with the yarn.nodemanager.log-dirs property (see also below). yarn.nodemanager.delete.debug-delay-sec @@ -1510,28 +1465,45 @@ The cgroups hierarchy under which to place YARN proccesses (cannot contain commas). If yarn.nodemanager.linux-container-executor.cgroups.mount is false - (that is, if cgroups have been pre-configured) and the Yarn user has write + (that is, if cgroups have been pre-configured) and the YARN user has write access to the parent directory, then the directory will be created. - If the directory already exists, the administrator has to give Yarn + If the directory already exists, the administrator has to give YARN write permissions to it recursively. - Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler. + This property only applies when the LCE resources handler is set to + CgroupsLCEResourcesHandler. yarn.nodemanager.linux-container-executor.cgroups.hierarchy /hadoop-yarn Whether the LCE should attempt to mount cgroups if not found. - Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler. + This property only applies when the LCE resources handler is set to + CgroupsLCEResourcesHandler. + yarn.nodemanager.linux-container-executor.cgroups.mount false - Where the LCE should attempt to mount cgroups if not found. Common locations - include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux - distribution in use. This path must exist before the NodeManager is launched. - Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and - yarn.nodemanager.linux-container-executor.cgroups.mount is true. + This property sets the path from which YARN will read the + CGroups configuration. YARN has built-in functionality to discover the + system CGroup mount paths, so use this property only if YARN's automatic + mount path discovery does not work. + + The path specified by this property must exist before the NodeManager is + launched. + If yarn.nodemanager.linux-container-executor.cgroups.mount is set to true, + YARN will first try to mount the CGroups at the specified path before + reading them. + If yarn.nodemanager.linux-container-executor.cgroups.mount is set to + false, YARN will read the CGroups at the specified path. + If this property is empty, YARN tries to detect the CGroups location. + + Please refer to NodeManagerCgroups.html in the documentation for further + details. + This property only applies when the LCE resources handler is set to + CgroupsLCEResourcesHandler. + yarn.nodemanager.linux-container-executor.cgroups.mount-path @@ -2686,8 +2658,47 @@ The arguments to pass to the Node label script. yarn.nodemanager.node-labels.provider.script.opts - + + + + Flag to indicate whether the RM is participating in Federation or not. + + yarn.federation.enabled + false + + + + Machine list file to be loaded by the FederationSubCluster Resolver + + yarn.federation.machine-list + + + + Class name for SubClusterResolver + + yarn.federation.subcluster-resolver.class + org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl + + + + + Store class name for federation state store + + yarn.federation.state-store.class + org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore + + + + + The time in seconds after which the federation state store local cache + will be refreshed periodically + + yarn.federation.cache-ttl.secs + 300 + + + The interval that the yarn client library uses to poll the completion status of the asynchronous API of application client protocol. @@ -3126,6 +3137,45 @@ false + + + The comma separated list of class names that implement the + RequestInterceptor interface. This is used by the RouterClientRMService + to create the request processing pipeline for users. + + yarn.router.clientrm.interceptor-class.pipeline + org.apache.hadoop.yarn.server.router.clientrm.DefaultClientRequestInterceptor + + + + + Size of LRU cache for Router ClientRM Service and RMAdmin Service. + + yarn.router.pipeline.cache-max-size + 25 + + + + + The comma separated list of class names that implement the + RequestInterceptor interface. This is used by the RouterRMAdminService + to create the request processing pipeline for users. + + yarn.router.rmadmin.interceptor-class.pipeline + org.apache.hadoop.yarn.server.router.rmadmin.DefaultRMAdminRequestInterceptor + + + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.router.address and yarn.router.webapp.address, respectively. This is + most useful for making Router listen to all interfaces by setting to 0.0.0.0. + + yarn.router.bind-host + + + Comma-separated list of PlacementRules to determine how applications @@ -3136,4 +3186,44 @@ user-group + + + The comma separated list of class names that implement the + RequestInterceptor interface. This is used by the RouterWebServices + to create the request processing pipeline for users. + + yarn.router.webapp.interceptor-class.pipeline + org.apache.hadoop.yarn.server.router.webapp.DefaultRequestInterceptorREST + + + + + The http address of the Router web application. + If only a host is provided as the value, + the webapp will be served on a random port. + + yarn.router.webapp.address + 0.0.0.0:8089 + + + + + The https address of the Router web application. + If only a host is provided as the value, + the webapp will be served on a random port. + + yarn.router.webapp.https.address + 0.0.0.0:8091 + + + + + It is TimelineClient 1.5 configuration whether to store active + application’s timeline data with in user directory i.e + ${yarn.timeline-service.entity-group-fs-store.active-dir}/${user.name} + + yarn.timeline-service.entity-group-fs-store.with-user-dir + false + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index 90c7573a0f3..dfe75349748 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -33,6 +33,8 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -193,6 +195,7 @@ public class TestContainerLaunchRPC { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException, IOException { return null; @@ -236,5 +239,11 @@ public class TestContainerLaunchRPC { throws YarnException, IOException { return null; } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return null; + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java index f97f7c74df7..6e9728475e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerResourceIncreaseRPC.java @@ -27,6 +27,8 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; @@ -111,11 +113,11 @@ public class TestContainerResourceIncreaseRPC { // Construct container resource increase request, List increaseTokens = new ArrayList<>(); increaseTokens.add(containerToken); - IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest - .newInstance(increaseTokens); + ContainerUpdateRequest request = ContainerUpdateRequest + .newInstance(increaseTokens); + try { - proxy.increaseContainersResource(increaseRequest); + proxy.updateContainer(request); } catch (Exception e) { LOG.info(StringUtils.stringifyException(e)); Assert.assertEquals("Error, exception is not: " @@ -170,8 +172,16 @@ public class TestContainerResourceIncreaseRPC { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( - IncreaseContainersResourceRequest request) throws YarnException, IOException { + IncreaseContainersResourceRequest request) + throws YarnException, IOException { + return null; + } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { try { // make the thread sleep to look like its not going to respond Thread.sleep(10000); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index b62b4ee61ae..bb688c93a9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -27,6 +27,8 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenReque import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest; @@ -379,6 +381,8 @@ public class TestPBImplRecords extends BasePBImplRecordsTest { generateByNewInstance(StartContainerRequest.class); generateByNewInstance(NodeLabel.class); generateByNewInstance(UpdatedContainer.class); + generateByNewInstance(ContainerUpdateRequest.class); + generateByNewInstance(ContainerUpdateResponse.class); // genByNewInstance does not apply to QueueInfo, cause // it is recursive(has sub queues) typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java index d3826e1a6fe..85730339806 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientForATS1_5.java @@ -59,25 +59,30 @@ public class TestTimelineClientForATS1_5 { private static FileContext localFS; private static File localActiveDir; private TimelineWriter spyTimelineWriter; + private UserGroupInformation authUgi; @Before public void setup() throws Exception { localFS = FileContext.getLocalFSFileContext(); localActiveDir = new File("target", this.getClass().getSimpleName() + "-activeDir") - .getAbsoluteFile(); + .getAbsoluteFile(); localFS.delete(new Path(localActiveDir.getAbsolutePath()), true); localActiveDir.mkdir(); LOG.info("Created activeDir in " + localActiveDir.getAbsolutePath()); + authUgi = UserGroupInformation.getCurrentUser(); + } + + private YarnConfiguration getConfigurations() { YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5f); conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR, - localActiveDir.getAbsolutePath()); + localActiveDir.getAbsolutePath()); conf.set( - YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_ENTITY_TYPES, - "summary_type"); - client = createTimelineClient(conf); + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_ENTITY_TYPES, + "summary_type"); + return conf; } @After @@ -90,6 +95,21 @@ public class TestTimelineClientForATS1_5 { @Test public void testPostEntities() throws Exception { + client = createTimelineClient(getConfigurations()); + verifyForPostEntities(false); + } + + @Test + public void testPostEntitiesToKeepUnderUserDir() throws Exception { + YarnConfiguration conf = getConfigurations(); + conf.setBoolean( + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR, + true); + client = createTimelineClient(conf); + verifyForPostEntities(true); + } + + private void verifyForPostEntities(boolean storeInsideUserDir) { ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); TimelineEntityGroupId groupId = @@ -118,7 +138,8 @@ public class TestTimelineClientForATS1_5 { entityTDB[0] = entities[0]; verify(spyTimelineWriter, times(1)).putEntities(entityTDB); Assert.assertTrue(localFS.util().exists( - new Path(getAppAttemptDir(attemptId1), "summarylog-" + new Path(getAppAttemptDir(attemptId1, storeInsideUserDir), + "summarylog-" + attemptId1.toString()))); reset(spyTimelineWriter); @@ -132,13 +153,16 @@ public class TestTimelineClientForATS1_5 { verify(spyTimelineWriter, times(0)).putEntities( any(TimelineEntity[].class)); Assert.assertTrue(localFS.util().exists( - new Path(getAppAttemptDir(attemptId2), "summarylog-" + new Path(getAppAttemptDir(attemptId2, storeInsideUserDir), + "summarylog-" + attemptId2.toString()))); Assert.assertTrue(localFS.util().exists( - new Path(getAppAttemptDir(attemptId2), "entitylog-" + new Path(getAppAttemptDir(attemptId2, storeInsideUserDir), + "entitylog-" + groupId.toString()))); Assert.assertTrue(localFS.util().exists( - new Path(getAppAttemptDir(attemptId2), "entitylog-" + new Path(getAppAttemptDir(attemptId2, storeInsideUserDir), + "entitylog-" + groupId2.toString()))); reset(spyTimelineWriter); } catch (Exception e) { @@ -148,6 +172,21 @@ public class TestTimelineClientForATS1_5 { @Test public void testPutDomain() { + client = createTimelineClient(getConfigurations()); + verifyForPutDomain(false); + } + + @Test + public void testPutDomainToKeepUnderUserDir() { + YarnConfiguration conf = getConfigurations(); + conf.setBoolean( + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR, + true); + client = createTimelineClient(conf); + verifyForPutDomain(true); + } + + private void verifyForPutDomain(boolean storeInsideUserDir) { ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); ApplicationAttemptId attemptId1 = @@ -161,23 +200,33 @@ public class TestTimelineClientForATS1_5 { client.putDomain(attemptId1, domain); verify(spyTimelineWriter, times(0)).putDomain(domain); - Assert.assertTrue(localFS.util().exists( - new Path(getAppAttemptDir(attemptId1), "domainlog-" - + attemptId1.toString()))); + Assert.assertTrue(localFS.util() + .exists(new Path(getAppAttemptDir(attemptId1, storeInsideUserDir), + "domainlog-" + attemptId1.toString()))); reset(spyTimelineWriter); } catch (Exception e) { Assert.fail("Exception is not expected." + e); } } - private Path getAppAttemptDir(ApplicationAttemptId appAttemptId) { - Path appDir = - new Path(localActiveDir.getAbsolutePath(), appAttemptId - .getApplicationId().toString()); + private Path getAppAttemptDir(ApplicationAttemptId appAttemptId, + boolean storeInsideUserDir) { + Path userDir = getUserDir(appAttemptId, storeInsideUserDir); + Path appDir = new Path(userDir, appAttemptId.getApplicationId().toString()); Path attemptDir = new Path(appDir, appAttemptId.toString()); return attemptDir; } + private Path getUserDir(ApplicationAttemptId appAttemptId, + boolean storeInsideUserDir) { + if (!storeInsideUserDir) { + return new Path(localActiveDir.getAbsolutePath()); + } + Path userDir = + new Path(localActiveDir.getAbsolutePath(), authUgi.getShortUserName()); + return userDir; + } + private static TimelineEntity generateEntity(String type) { TimelineEntity entity = new TimelineEntity(); entity.setEntityId("entity id"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java index 738942300a0..a053fdb9376 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java @@ -24,7 +24,6 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.junit.Test; import java.net.InetSocketAddress; -import java.net.SocketAddress; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLRUCacheHashMap.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLRUCacheHashMap.java new file mode 100644 index 00000000000..9d3ec32975a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLRUCacheHashMap.java @@ -0,0 +1,74 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.util; + +import java.io.IOException; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test class to validate the correctness of the {@code LRUCacheHashMap}. + * + */ +public class TestLRUCacheHashMap { + + /** + * Test if the different entries are generated, and LRU cache is working as + * expected. + */ + @Test + public void testLRUCache() + throws YarnException, IOException, InterruptedException { + + int mapSize = 5; + + LRUCacheHashMap map = + new LRUCacheHashMap(mapSize, true); + + map.put("1", 1); + map.put("2", 2); + map.put("3", 3); + map.put("4", 4); + map.put("5", 5); + + Assert.assertEquals(mapSize, map.size()); + + // Check if all the elements in the map are from 1 to 5 + for (int i = 1; i < mapSize; i++) { + Assert.assertTrue(map.containsKey(Integer.toString(i))); + } + + map.put("6", 6); + map.put("3", 3); + map.put("7", 7); + map.put("8", 8); + + Assert.assertEquals(mapSize, map.size()); + + // Check if all the elements in the map are from 5 to 8 and the 3 + for (int i = 5; i < mapSize; i++) { + Assert.assertTrue(map.containsKey(Integer.toString(i))); + } + + Assert.assertTrue(map.containsKey("3")); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java index aad513a9ea5..43a5182eefb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java @@ -236,7 +236,7 @@ public class TestProcfsBasedProcessTree { "vmem for the gone-process is " + p.getVirtualMemorySize() + " . It should be UNAVAILABLE(-1).", p.getVirtualMemorySize() == UNAVAILABLE); - Assert.assertTrue(p.toString().equals("[ ]")); + Assert.assertEquals("[ ]", p.toString()); } protected ProcfsBasedProcessTree createProcessTree(String pid) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java index 47ee8220071..7e41501aaf2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestYarnVersionInfo.java @@ -20,9 +20,9 @@ package org.apache.hadoop.yarn.util; import java.io.IOException; -import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.junit.Test; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; /** * A JUnit test to test {@link YarnVersionInfo} @@ -38,9 +38,12 @@ public class TestYarnVersionInfo { // can't easily know what the correct values are going to be so just // make sure they aren't Unknown - assertTrue("getVersion returned Unknown", !YarnVersionInfo.getVersion().equals("Unknown")); - assertTrue("getUser returned Unknown", !YarnVersionInfo.getUser().equals("Unknown")); - assertTrue("getSrcChecksum returned Unknown", !YarnVersionInfo.getSrcChecksum().equals("Unknown")); + assertNotEquals("getVersion returned Unknown", + "Unknown", YarnVersionInfo.getVersion()); + assertNotEquals("getUser returned Unknown", + "Unknown", YarnVersionInfo.getUser()); + assertNotEquals("getSrcChecksum returned Unknown", + "Unknown", YarnVersionInfo.getSrcChecksum()); // these could be Unknown if the VersionInfo generated from code not in svn or git // so just check that they return something diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java index 66d9ef22c38..075bed216dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestSubViews.java @@ -32,14 +32,14 @@ public class TestSubViews { static public class MainView extends HtmlPage { @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { html. body(). div(). - _(Sub1.class)._(). + __(Sub1.class).__(). div(). i("inline text"). - _(Sub2.class)._()._()._(); + __(Sub2.class).__().__().__(); } } @@ -48,7 +48,7 @@ public class TestSubViews { public void render(Block html) { html. div("#sub1"). - _("sub1 text")._(); + __("sub1 text").__(); } } @@ -57,7 +57,7 @@ public class TestSubViews { public void render(Block html) { html. pre(). - _("sub2 text")._(); + __("sub2 text").__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java index db50dd3daa7..dea146da282 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java @@ -100,7 +100,7 @@ public class TestWebApp { static class TablesView extends HtmlPage { @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { set(DATATABLES_ID, "t1 t2 t3 t4"); set(initID(DATATABLES, "t1"), tableInit().append("}").toString()); set(initID(DATATABLES, "t2"), join("{bJQueryUI:true, sDom:'t',", @@ -110,7 +110,7 @@ public class TestWebApp { html. title("Test DataTables"). link("/static/yarn.css"). - _(JQueryUI.class). + __(JQueryUI.class). style(".wrapper { padding: 1em }", ".wrapper h2 { margin: 0.5em 0 }", ".dataTables_wrapper { min-height: 1em }"). @@ -118,33 +118,33 @@ public class TestWebApp { h2("Default table init"). table("#t1"). thead(). - tr().th("Column1").th("Column2")._()._(). + tr().th("Column1").th("Column2").__().__(). tbody(). - tr().td("c1r1").td("c2r1")._(). - tr().td("c1r2").td("c2r2")._()._()._(). + tr().td("c1r1").td("c2r1").__(). + tr().td("c1r2").td("c2r2").__().__().__(). h2("Nested tables"). div(_INFO_WRAP). table("#t2"). thead(). - tr().th(_TH, "Column1").th(_TH, "Column2")._()._(). + tr().th(_TH, "Column1").th(_TH, "Column2").__().__(). tbody(). tr().td("r1"). // th wouldn't work as of dt 1.7.5 td().$class(C_TABLE). table("#t3"). thead(). - tr().th("SubColumn1").th("SubColumn2")._()._(). + tr().th("SubColumn1").th("SubColumn2").__().__(). tbody(). - tr().td("subc1r1").td("subc2r1")._(). - tr().td("subc1r2").td("subc2r2")._()._()._()._()._(). + tr().td("subc1r1").td("subc2r1").__(). + tr().td("subc1r2").td("subc2r2").__().__().__().__().__(). tr().td("r2"). // ditto td().$class(C_TABLE). table("#t4"). thead(). - tr().th("SubColumn1").th("SubColumn2")._()._(). + tr().th("SubColumn1").th("SubColumn2").__().__(). tbody(). - tr().td("subc1r1").td("subc2r1")._(). - tr().td("subc1r2").td("subc2r2")._(). - _()._()._()._()._()._()._()._()._(); + tr().td("subc1r1").td("subc2r1").__(). + tr().td("subc1r2").td("subc2r2").__(). + __().__().__().__().__().__().__().__().__(); } } @@ -358,7 +358,7 @@ public class TestWebApp { assertEquals("foo", getContent(baseUrl +"test/foo").trim()); app1 = WebApps.$for("test", this).at(port).start(); assertEquals(port, app1.getListenerAddress().getPort()); - app2 = WebApps.$for("test", this).at("0.0.0.0",port, true).start(); + app2 = WebApps.$for("test", this).at("0.0.0.0", port, true).start(); assertTrue(app2.getListenerAddress().getPort() > port); Configuration conf = new Configuration(); port = ServerSocketUtil.waitForPort(47000, 60); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java index 89042c64c81..e510dd57ba8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlBlock.java @@ -24,8 +24,6 @@ import java.io.PrintWriter; import org.apache.hadoop.yarn.webapp.WebAppException; import org.apache.hadoop.yarn.webapp.test.WebAppTests; -import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import org.apache.hadoop.yarn.webapp.view.HtmlPage; import org.junit.Test; import static org.mockito.Mockito.*; @@ -35,7 +33,7 @@ public class TestHtmlBlock { @Override public void render(Block html) { html. - p("#testid")._("test note")._(); + p("#testid").__("test note").__(); } } @@ -43,16 +41,16 @@ public class TestHtmlBlock { @Override public void render(Block html) { html. - p()._("should throw"); + p().__("should throw"); } } public static class ShortPage extends HtmlPage { @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { html. title("short test"). - _(ShortBlock.class); + __(ShortBlock.class); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java index a5a8e1f1ce1..beed31fb478 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestHtmlPage.java @@ -25,7 +25,6 @@ import java.io.PrintWriter; import org.apache.hadoop.yarn.webapp.MimeType; import org.apache.hadoop.yarn.webapp.WebAppException; import org.apache.hadoop.yarn.webapp.test.WebAppTests; -import org.apache.hadoop.yarn.webapp.view.HtmlPage; import org.junit.Test; import static org.mockito.Mockito.*; @@ -34,19 +33,19 @@ public class TestHtmlPage { public static class TestView extends HtmlPage { @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { html. title("test"). - p("#testid")._("test note")._()._(); + p("#testid").__("test note").__().__(); } } public static class ShortView extends HtmlPage { @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { html. title("short test"). - p()._("should throw"); + p().__("should throw"); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java index da5efbb82fb..751aa2cabe4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestInfoBlock.java @@ -45,7 +45,7 @@ public class TestInfoBlock { static { resInfo = new ResponseInfo(); - resInfo._("User_Name", JAVASCRIPT); + resInfo.__("User_Name", JAVASCRIPT); } @Override @@ -68,8 +68,8 @@ public class TestInfoBlock { static { resInfo = new ResponseInfo(); - resInfo._("Multiple_line_value", "This is one line."); - resInfo._("Multiple_line_value", "This is first line.\nThis is second line."); + resInfo.__("Multiple_line_value", "This is one line."); + resInfo.__("Multiple_line_value", "This is first line.\nThis is second line."); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java index a718636cbf5..20df4093ad3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/view/TestTwoColumnCssPage.java @@ -22,8 +22,6 @@ import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.WebApps; import org.apache.hadoop.yarn.webapp.test.WebAppTests; -import org.apache.hadoop.yarn.webapp.view.HtmlPage; -import org.apache.hadoop.yarn.webapp.view.TwoColumnCssLayout; import org.junit.Test; public class TestTwoColumnCssPage { @@ -52,10 +50,10 @@ public class TestTwoColumnCssPage { public static class TestView extends HtmlPage { @Override - public void render(Page.HTML<_> html) { + public void render(Page.HTML<__> html) { html. title($("title")). - h1($("title"))._(); + h1($("title")).__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml index d732af42b26..cace493f634 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -131,10 +131,6 @@ com.google.guava guava - - commons-logging - commons-logging - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java index 73d5d392776..7d57048de2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -61,11 +59,13 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider; import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ApplicationHistoryClientService extends AbstractService implements ApplicationHistoryProtocol { - private static final Log LOG = LogFactory - .getLog(ApplicationHistoryClientService.class); + private static final Logger LOG = + LoggerFactory.getLogger(ApplicationHistoryClientService.class); private ApplicationHistoryManager history; private Server server; private InetSocketAddress bindAddress; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java index 130bb32b0e4..b8931d81abc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java @@ -23,8 +23,6 @@ import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; @@ -42,11 +40,13 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.Container import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ApplicationHistoryManagerImpl extends AbstractService implements ApplicationHistoryManager { - private static final Log LOG = LogFactory - .getLog(ApplicationHistoryManagerImpl.class); + private static final Logger LOG = + LoggerFactory.getLogger(ApplicationHistoryManagerImpl.class); private static final String UNAVAILABLE = "N/A"; private ApplicationHistoryStore historyStore; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java index 5404338bde1..9240ed872e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java @@ -28,8 +28,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -69,12 +67,14 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class ApplicationHistoryManagerOnTimelineStore extends AbstractService implements ApplicationHistoryManager { - private static final Log LOG = LogFactory - .getLog(ApplicationHistoryManagerOnTimelineStore.class); + private static final Logger LOG = LoggerFactory + .getLogger(ApplicationHistoryManagerOnTimelineStore.class); @VisibleForTesting static final String UNAVAILABLE = "N/A"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java index 6e6e98bde32..85e5f2db0af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; @@ -60,6 +58,8 @@ import org.eclipse.jetty.servlet.FilterHolder; import org.eclipse.jetty.webapp.WebAppContext; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * History server that keeps track of all types of history in the cluster. @@ -68,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting; public class ApplicationHistoryServer extends CompositeService { public static final int SHUTDOWN_HOOK_PRIORITY = 30; - private static final Log LOG = LogFactory - .getLog(ApplicationHistoryServer.class); + private static final Logger LOG = LoggerFactory + .getLogger(ApplicationHistoryServer.class); private ApplicationHistoryClientService ahsClientService; private ApplicationACLsManager aclsManager; @@ -178,7 +178,7 @@ public class ApplicationHistoryServer extends CompositeService { appHistoryServer.init(conf); appHistoryServer.start(); } catch (Throwable t) { - LOG.fatal("Error starting ApplicationHistoryServer", t); + LOG.error("Error starting ApplicationHistoryServer", t); ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer"); } return appHistoryServer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java index be7bc6df0eb..fa2da44f5f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java @@ -30,8 +30,6 @@ import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -74,6 +72,8 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.records.impl.pb.C import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.protobuf.InvalidProtocolBufferException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * File system implementation of {@link ApplicationHistoryStore}. In this @@ -89,8 +89,8 @@ import com.google.protobuf.InvalidProtocolBufferException; public class FileSystemApplicationHistoryStore extends AbstractService implements ApplicationHistoryStore { - private static final Log LOG = LogFactory - .getLog(FileSystemApplicationHistoryStore.class); + private static final Logger LOG = LoggerFactory + .getLogger(FileSystemApplicationHistoryStore.class); private static final String ROOT_DIR_NAME = "ApplicationHistoryDataRoot"; private static final int MIN_BLOCK_SIZE = 256 * 1024; @@ -141,7 +141,7 @@ public class FileSystemApplicationHistoryStore extends AbstractService } outstandingWriters.clear(); } finally { - IOUtils.cleanup(LOG, fs); + IOUtils.cleanupWithLogger(LOG, fs); } super.serviceStop(); } @@ -711,12 +711,12 @@ public class FileSystemApplicationHistoryStore extends AbstractService } public void reset() throws IOException { - IOUtils.cleanup(LOG, scanner); + IOUtils.cleanupWithLogger(LOG, scanner); scanner = reader.createScanner(); } public void close() { - IOUtils.cleanup(LOG, scanner, reader, fsdis); + IOUtils.cleanupWithLogger(LOG, scanner, reader, fsdis); } } @@ -740,13 +740,13 @@ public class FileSystemApplicationHistoryStore extends AbstractService YarnConfiguration.DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE), null, getConfig()); } catch (IOException e) { - IOUtils.cleanup(LOG, fsdos); + IOUtils.cleanupWithLogger(LOG, fsdos); throw e; } } public synchronized void close() { - IOUtils.cleanup(LOG, writer, fsdos); + IOUtils.cleanupWithLogger(LOG, writer, fsdos); } public synchronized void writeHistoryData(HistoryDataKey key, byte[] value) @@ -756,13 +756,13 @@ public class FileSystemApplicationHistoryStore extends AbstractService dos = writer.prepareAppendKey(-1); key.write(dos); } finally { - IOUtils.cleanup(LOG, dos); + IOUtils.cleanupWithLogger(LOG, dos); } try { dos = writer.prepareAppendValue(value.length); dos.write(value); } finally { - IOUtils.cleanup(LOG, dos); + IOUtils.cleanupWithLogger(LOG, dos); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java index 3798ee5b3b2..1601f8c54ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSErrorsAndWarningsPage.java @@ -34,7 +34,7 @@ public class AHSErrorsAndWarningsPage extends AHSView { } @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String title = "Errors and Warnings in the Application History Server"; setTitle(title); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java index 8821bc02dcb..d8455033f09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSLogsPage.java @@ -32,7 +32,7 @@ public class AHSLogsPage extends AHSView { * preHead(org.apache.hadoop .yarn.webapp.hamlet.Hamlet.HTML) */ @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { String logEntity = $(ENTITY_STRING); if (logEntity == null || logEntity.isEmpty()) { logEntity = $(CONTAINER_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java index 65b5ac168a6..d965eebb456 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSView.java @@ -37,7 +37,7 @@ public class AHSView extends TwoColumnLayout { static final int MAX_FAST_ROWS = 1000; // inline js array @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "apps"); set(initID(DATATABLES, "apps"), WebPageUtils.appsTableInit(false)); @@ -49,7 +49,7 @@ public class AHSView extends TwoColumnLayout { setTitle(sjoin(reqState, "Applications")); } - protected void commonPreHead(Page.HTML<_> html) { + protected void commonPreHead(Page.HTML<__> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java index 619519934fa..13410a88054 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java @@ -42,8 +42,6 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -80,12 +78,15 @@ import com.google.inject.Inject; import com.google.inject.Singleton; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.UniformInterfaceException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Singleton @Path("/ws/v1/applicationhistory") public class AHSWebServices extends WebServices { - private static final Log LOG = LogFactory.getLog(AHSWebServices.class); + private static final Logger LOG = LoggerFactory + .getLogger(AHSWebServices.class); private static final String NM_DOWNLOAD_URI_STR = "/ws/v1/node/containers"; private static final Joiner JOINER = Joiner.on(""); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java index b2419e9ac6b..996568bf778 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutBlock.java @@ -19,9 +19,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; import com.google.inject.Inject; -import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout; -import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.hadoop.yarn.webapp.View; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -38,10 +36,10 @@ public class AboutBlock extends HtmlBlock { TimelineAbout tsInfo = TimelineUtils.createTimelineAbout( "Timeline Server - Generic History Service UI"); info("Timeline Server Overview"). - _("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() + + __("Timeline Server Version:", tsInfo.getTimelineServiceBuildVersion() + " on " + tsInfo.getTimelineServiceVersionBuiltOn()). - _("Hadoop Version:", tsInfo.getHadoopBuildVersion() + + __("Hadoop Version:", tsInfo.getHadoopBuildVersion() + " on " + tsInfo.getHadoopVersionBuiltOn()); - html._(InfoBlock.class); + html.__(InfoBlock.class); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java index b50073af646..1df5832132c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AboutPage.java @@ -20,12 +20,9 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.YarnWebParams; - -import static org.apache.hadoop.yarn.util.StringHelper.join; public class AboutPage extends AHSView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(TITLE, "Timeline Server - Generic History Service"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java index a08297db9a6..ec00db675d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppAttemptPage.java @@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class AppAttemptPage extends AHSView { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java index c0e1394ae62..32fcc958bea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AppPage.java @@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class AppPage extends AHSView { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String appId = $(YarnWebParams.APPLICATION_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java index 1be8a26136d..8327ee68a15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ContainerPage.java @@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class ContainerPage extends AHSView { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String containerId = $(YarnWebParams.CONTAINER_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java index 25ee4f0a221..915af4a9969 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java @@ -18,21 +18,19 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; +import static org.apache.hadoop.util.GenericsUtil.isLog4jLogger; + public class NavBlock extends HtmlBlock { @Override public void render(Block html) { boolean addErrorsAndWarningsLink = false; - Log log = LogFactory.getLog(NavBlock.class); - if (log instanceof Log4JLogger) { + if (isLog4jLogger(NavBlock.class)) { Log4jWarningErrorMetricsAppender appender = Log4jWarningErrorMetricsAppender.findAppender(); if (appender != null) { @@ -44,34 +42,34 @@ public class NavBlock extends HtmlBlock { h3("Application History"). ul(). li().a(url("about"), "About"). - _(). + __(). li().a(url("apps"), "Applications"). ul(). li().a(url("apps", YarnApplicationState.FINISHED.toString()), YarnApplicationState.FINISHED.toString()). - _(). + __(). li().a(url("apps", YarnApplicationState.FAILED.toString()), YarnApplicationState.FAILED.toString()). - _(). + __(). li().a(url("apps", YarnApplicationState.KILLED.toString()), YarnApplicationState.KILLED.toString()). - _(). - _(). - _(). - _(); + __(). + __(). + __(). + __(); Hamlet.UL> tools = nav.h3("Tools").ul(); - tools.li().a("/conf", "Configuration")._() - .li().a("/logs", "Local logs")._() - .li().a("/stacks", "Server stacks")._() - .li().a("/jmx?qry=Hadoop:*", "Server metrics")._(); + tools.li().a("/conf", "Configuration").__() + .li().a("/logs", "Local logs").__() + .li().a("/stacks", "Server stacks").__() + .li().a("/jmx?qry=Hadoop:*", "Server metrics").__(); if (addErrorsAndWarningsLink) { - tools.li().a(url("errors-and-warnings"), "Errors/Warnings")._(); + tools.li().a(url("errors-and-warnings"), "Errors/Warnings").__(); } - tools._()._(); + tools.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java index 79e2bf29990..82db770191b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/KeyValueBasedTimelineStore.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.server.timeline; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.service.AbstractService; @@ -33,6 +31,8 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEnt import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError; import org.apache.hadoop.yarn.server.timeline.TimelineDataManager.CheckAcl; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; @@ -71,8 +71,8 @@ abstract class KeyValueBasedTimelineStore private boolean serviceStopped = false; - private static final Log LOG - = LogFactory.getLog(KeyValueBasedTimelineStore.class); + private static final Logger LOG + = LoggerFactory.getLogger(KeyValueBasedTimelineStore.class); public KeyValueBasedTimelineStore() { super(KeyValueBasedTimelineStore.class.getName()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index ffe0413051a..e3db1dcc28a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -22,8 +22,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.commons.collections.map.LRUMap; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; @@ -48,6 +46,7 @@ import org.apache.hadoop.yarn.server.timeline.util.LeveldbUtils.KeyParser; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.*; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; @@ -118,8 +117,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes; @InterfaceStability.Unstable public class LeveldbTimelineStore extends AbstractService implements TimelineStore { - private static final Log LOG = LogFactory - .getLog(LeveldbTimelineStore.class); + private static final org.slf4j.Logger LOG = LoggerFactory + .getLogger(LeveldbTimelineStore.class); @Private @VisibleForTesting @@ -240,7 +239,7 @@ public class LeveldbTimelineStore extends AbstractService localFS.setPermission(dbPath, LEVELDB_DIR_UMASK); } } finally { - IOUtils.cleanup(LOG, localFS); + IOUtils.cleanupWithLogger(LOG, localFS); } LOG.info("Using leveldb path " + dbPath); try { @@ -284,7 +283,7 @@ public class LeveldbTimelineStore extends AbstractService " closing db now", e); } } - IOUtils.cleanup(LOG, db); + IOUtils.cleanupWithLogger(LOG, db); super.serviceStop(); } @@ -320,7 +319,7 @@ public class LeveldbTimelineStore extends AbstractService discardOldEntities(timestamp); Thread.sleep(ttlInterval); } catch (IOException e) { - LOG.error(e); + LOG.error(e.toString()); } catch (InterruptedException e) { LOG.info("Deletion thread received interrupt, exiting"); break; @@ -394,7 +393,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } } @@ -570,7 +569,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } return events; } @@ -753,7 +752,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } } @@ -925,7 +924,7 @@ public class LeveldbTimelineStore extends AbstractService } finally { lock.unlock(); writeLocks.returnLock(lock); - IOUtils.cleanup(LOG, writeBatch); + IOUtils.cleanupWithLogger(LOG, writeBatch); } for (EntityIdentifier relatedEntity : relatedEntitiesWithoutStartTimes) { @@ -1376,7 +1375,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } } @@ -1506,7 +1505,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, writeBatch); + IOUtils.cleanupWithLogger(LOG, writeBatch); } } @@ -1548,7 +1547,7 @@ public class LeveldbTimelineStore extends AbstractService LOG.error("Got IOException while deleting entities for type " + entityType + ", continuing to next type", e); } finally { - IOUtils.cleanup(LOG, iterator, pfIterator); + IOUtils.cleanupWithLogger(LOG, iterator, pfIterator); deleteLock.writeLock().unlock(); if (typeCount > 0) { LOG.info("Deleted " + typeCount + " entities of type " + @@ -1629,7 +1628,7 @@ public class LeveldbTimelineStore extends AbstractService String incompatibleMessage = "Incompatible version for timeline store: expecting version " + getCurrentVersion() + ", but loading version " + loadedVersion; - LOG.fatal(incompatibleMessage); + LOG.error(incompatibleMessage); throw new IOException(incompatibleMessage); } } @@ -1718,7 +1717,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, writeBatch); + IOUtils.cleanupWithLogger(LOG, writeBatch); } } @@ -1755,7 +1754,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } } @@ -1805,7 +1804,7 @@ public class LeveldbTimelineStore extends AbstractService } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java index 6d10671c6a0..5c511a3aaac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDB.java @@ -33,8 +33,6 @@ import java.util.Map.Entry; import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang.time.FastDateFormat; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -45,6 +43,8 @@ import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; import org.iq80.leveldb.Options; import org.iq80.leveldb.WriteBatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Contains the logic to lookup a leveldb by timestamp so that multiple smaller @@ -54,7 +54,8 @@ import org.iq80.leveldb.WriteBatch; class RollingLevelDB { /** Logger for this class. */ - private static final Log LOG = LogFactory.getLog(RollingLevelDB.class); + private static final Logger LOG = LoggerFactory. + getLogger(RollingLevelDB.class); /** Factory to open and create new leveldb instances. */ private static JniDBFactory factory = new JniDBFactory(); /** Thread safe date formatter. */ @@ -151,7 +152,7 @@ class RollingLevelDB { } public void close() { - IOUtils.cleanup(LOG, writeBatch); + IOUtils.cleanupWithLogger(LOG, writeBatch); } } @@ -346,7 +347,7 @@ class RollingLevelDB { .iterator(); while (iterator.hasNext()) { Entry entry = iterator.next(); - IOUtils.cleanup(LOG, entry.getValue()); + IOUtils.cleanupWithLogger(LOG, entry.getValue()); String dbName = fdf.format(entry.getKey()); Path path = new Path(rollingDBPath, getName() + "." + dbName); try { @@ -361,9 +362,9 @@ class RollingLevelDB { public void stop() throws Exception { for (DB db : rollingdbs.values()) { - IOUtils.cleanup(LOG, db); + IOUtils.cleanupWithLogger(LOG, db); } - IOUtils.cleanup(LOG, lfs); + IOUtils.cleanupWithLogger(LOG, lfs); } private long computeNextCheckMillis(long now) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index 00f66308605..1ac170c2505 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -38,8 +38,6 @@ import java.util.TreeMap; import org.apache.commons.collections.map.LRUMap; import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; @@ -76,6 +74,8 @@ import org.iq80.leveldb.ReadOptions; import org.iq80.leveldb.WriteBatch; import org.nustaq.serialization.FSTConfiguration; import org.nustaq.serialization.FSTClazzNameRegistry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static java.nio.charset.StandardCharsets.UTF_8; @@ -168,8 +168,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes; @InterfaceStability.Unstable public class RollingLevelDBTimelineStore extends AbstractService implements TimelineStore { - private static final Log LOG = LogFactory - .getLog(RollingLevelDBTimelineStore.class); + private static final Logger LOG = LoggerFactory + .getLogger(RollingLevelDBTimelineStore.class); private static FSTConfiguration fstConf = FSTConfiguration.createDefaultConfiguration(); // Fall back to 2.24 parsing if 2.50 parsing fails @@ -368,9 +368,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements + " closing db now", e); } } - IOUtils.cleanup(LOG, domaindb); - IOUtils.cleanup(LOG, starttimedb); - IOUtils.cleanup(LOG, ownerdb); + IOUtils.cleanupWithLogger(LOG, domaindb); + IOUtils.cleanupWithLogger(LOG, starttimedb); + IOUtils.cleanupWithLogger(LOG, ownerdb); entitydb.stop(); indexdb.stop(); super.serviceStop(); @@ -399,7 +399,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements discardOldEntities(timestamp); Thread.sleep(ttlInterval); } catch (IOException e) { - LOG.error(e); + LOG.error(e.toString()); } catch (InterruptedException e) { LOG.info("Deletion thread received interrupt, exiting"); break; @@ -1525,7 +1525,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements + ". Total start times deleted so far this cycle: " + startTimesCount); } - IOUtils.cleanup(LOG, writeBatch); + IOUtils.cleanupWithLogger(LOG, writeBatch); writeBatch = starttimedb.createWriteBatch(); batchSize = 0; } @@ -1545,7 +1545,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements LOG.info("Deleted " + startTimesCount + "/" + totalCount + " start time entities earlier than " + minStartTime); } finally { - IOUtils.cleanup(LOG, writeBatch); + IOUtils.cleanupWithLogger(LOG, writeBatch); } return startTimesCount; } @@ -1622,7 +1622,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements String incompatibleMessage = "Incompatible version for timeline store: " + "expecting version " + getCurrentVersion() + ", but loading version " + loadedVersion; - LOG.fatal(incompatibleMessage); + LOG.error(incompatibleMessage); throw new IOException(incompatibleMessage); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java index 57a9346602c..56b71faf2ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java @@ -26,8 +26,6 @@ import java.util.Iterator; import java.util.List; import java.util.SortedSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; @@ -45,6 +43,8 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager; import org.apache.hadoop.yarn.webapp.BadRequestException; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The class wrap over the timeline store and the ACLs manager. It does some non @@ -54,7 +54,8 @@ import com.google.common.annotations.VisibleForTesting; */ public class TimelineDataManager extends AbstractService { - private static final Log LOG = LogFactory.getLog(TimelineDataManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineDataManager.class); @VisibleForTesting public static final String DEFAULT_DOMAIN_ID = "DEFAULT"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java index b62a54111af..bcd57ef1d02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/recovery/LeveldbTimelineStateStore.java @@ -28,8 +28,6 @@ import java.io.File; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,6 +48,8 @@ import org.iq80.leveldb.DB; import org.iq80.leveldb.DBException; import org.iq80.leveldb.Options; import org.iq80.leveldb.WriteBatch; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.fusesource.leveldbjni.JniDBFactory.bytes; @@ -60,8 +60,8 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes; public class LeveldbTimelineStateStore extends TimelineStateStore { - public static final Log LOG = - LogFactory.getLog(LeveldbTimelineStateStore.class); + public static final Logger LOG = + LoggerFactory.getLogger(LeveldbTimelineStateStore.class); private static final String DB_NAME = "timeline-state-store.ldb"; private static final FsPermission LEVELDB_DIR_UMASK = FsPermission @@ -103,7 +103,7 @@ public class LeveldbTimelineStateStore extends localFS.setPermission(dbPath, LEVELDB_DIR_UMASK); } } finally { - IOUtils.cleanup(LOG, localFS); + IOUtils.cleanupWithLogger(LOG, localFS); } JniDBFactory factory = new JniDBFactory(); try { @@ -131,7 +131,7 @@ public class LeveldbTimelineStateStore extends @Override protected void closeStorage() throws IOException { - IOUtils.cleanup(LOG, db); + IOUtils.cleanupWithLogger(LOG, db); } @Override @@ -168,8 +168,8 @@ public class LeveldbTimelineStateStore extends } catch (DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, ds); - IOUtils.cleanup(LOG, batch); + IOUtils.cleanupWithLogger(LOG, ds); + IOUtils.cleanupWithLogger(LOG, batch); } } @@ -239,7 +239,7 @@ public class LeveldbTimelineStateStore extends key.write(dataStream); dataStream.close(); } finally { - IOUtils.cleanup(LOG, dataStream); + IOUtils.cleanupWithLogger(LOG, dataStream); } return memStream.toByteArray(); } @@ -253,7 +253,7 @@ public class LeveldbTimelineStateStore extends try { key.readFields(in); } finally { - IOUtils.cleanup(LOG, in); + IOUtils.cleanupWithLogger(LOG, in); } state.tokenMasterKeyState.add(key); } @@ -267,7 +267,7 @@ public class LeveldbTimelineStateStore extends try { data.readFields(in); } finally { - IOUtils.cleanup(LOG, in); + IOUtils.cleanupWithLogger(LOG, in); } state.tokenState.put(data.getTokenIdentifier(), data.getRenewDate()); } @@ -290,7 +290,7 @@ public class LeveldbTimelineStateStore extends ++numKeys; } } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } return numKeys; } @@ -314,7 +314,7 @@ public class LeveldbTimelineStateStore extends } catch (DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(LOG, iterator); + IOUtils.cleanupWithLogger(LOG, iterator); } return numTokens; } @@ -332,7 +332,7 @@ public class LeveldbTimelineStateStore extends try { state.latestSequenceNumber = in.readInt(); } finally { - IOUtils.cleanup(LOG, in); + IOUtils.cleanupWithLogger(LOG, in); } } } @@ -412,7 +412,7 @@ public class LeveldbTimelineStateStore extends String incompatibleMessage = "Incompatible version for timeline state store: expecting version " + getCurrentVersion() + ", but loading version " + loadedVersion; - LOG.fatal(incompatibleMessage); + LOG.error(incompatibleMessage); throw new IOException(incompatibleMessage); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java index 25252fc841e..6c32eecc5a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java @@ -24,8 +24,6 @@ import java.util.HashMap; import java.util.Map; import org.apache.commons.collections.map.LRUMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; @@ -41,6 +39,8 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore; import org.apache.hadoop.yarn.util.StringHelper; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * TimelineACLsManager check the entity level timeline data access. @@ -48,7 +48,8 @@ import com.google.common.annotations.VisibleForTesting; @Private public class TimelineACLsManager { - private static final Log LOG = LogFactory.getLog(TimelineACLsManager.class); + private static final Logger LOG = LoggerFactory. + getLogger(TimelineACLsManager.class); private static final int DOMAIN_ACCESS_ENTRY_CACHE_SIZE = 100; private AdminACLsManager adminAclsManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java index 60a0348b045..0c6892a19d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.timeline.security; import java.io.IOException; import java.util.Map.Entry; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -35,6 +33,8 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore; import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore; import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The service wrapper of {@link TimelineDelegationTokenSecretManager} @@ -118,8 +118,8 @@ public class TimelineDelegationTokenSecretManagerService extends public static class TimelineDelegationTokenSecretManager extends AbstractDelegationTokenSecretManager { - public static final Log LOG = - LogFactory.getLog(TimelineDelegationTokenSecretManager.class); + public static final Logger LOG = + LoggerFactory.getLogger(TimelineDelegationTokenSecretManager.class); private TimelineStateStore stateStore; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java index ad4e2bbc6fe..be8e3c599e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java @@ -43,8 +43,6 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.http.JettyUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; @@ -68,13 +66,16 @@ import org.apache.hadoop.yarn.webapp.NotFoundException; import com.google.inject.Inject; import com.google.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Singleton @Path("/ws/v1/timeline") //TODO: support XML serialization/deserialization public class TimelineWebServices { - private static final Log LOG = LogFactory.getLog(TimelineWebServices.class); + private static final Logger LOG = LoggerFactory + .getLogger(TimelineWebServices.class); private TimelineDataManager timelineDataManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java index 15a00d28727..df4adbe0c69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java @@ -32,8 +32,6 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -51,12 +49,14 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestFileSystemApplicationHistoryStore extends ApplicationHistoryStoreTestUtils { - private static Log LOG = LogFactory - .getLog(TestFileSystemApplicationHistoryStore.class.getName()); + private static final Logger LOG = LoggerFactory + .getLogger(TestFileSystemApplicationHistoryStore.class.getName()); private FileSystem fs; private Path fsWorkingPath; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java index 0c292d8e9f5..f68a1c41c61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLeveldbTimelineStore.java @@ -160,7 +160,7 @@ public class TestLeveldbTimelineStore extends TimelineStoreTestUtils { } catch(DBException e) { throw new IOException(e); } finally { - IOUtils.cleanup(null, iterator, pfIterator); + IOUtils.cleanupWithLogger(null, iterator, pfIterator); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index 1ee71104833..e8d38801c42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -58,6 +58,13 @@ org.apache.hadoop hadoop-yarn-common + + + org.apache.hadoop + hadoop-yarn-common + test-jar + test + com.google.guava @@ -95,6 +102,39 @@ org.fusesource.leveldbjni leveldbjni-all + + org.apache.geronimo.specs + geronimo-jcache_1.0_spec + + + org.ehcache + ehcache + + + com.zaxxer + HikariCP-java7 + + + org.hsqldb + hsqldb + test + + + com.microsoft.sqlserver + mssql-jdbc + runtime + + + com.microsoft.azure + azure-keyvault + + + + + org.apache.curator + curator-test + test + @@ -140,6 +180,7 @@ yarn_server_common_protos.proto yarn_server_common_service_protos.proto yarn_server_common_service_protos.proto + yarn_server_federation_protos.proto ResourceTracker.proto SCMUploader.proto collectornodemanager_protocol.proto @@ -163,6 +204,27 @@ + + org.apache.rat + apache-rat-plugin + + + src/test/resources/nodes + src/test/resources/nodes-malformed + + + + + maven-jar-plugin + + + + test-jar + + test-compile + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java index 3012be382cc..edec89f3025 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java @@ -71,7 +71,7 @@ public class ServerRMProxy extends RMProxy { @InterfaceAudience.Private @Override - protected InetSocketAddress getRMAddress(YarnConfiguration conf, + public InetSocketAddress getRMAddress(YarnConfiguration conf, Class protocol) { if (protocol == ResourceTracker.class) { return conf.getSocketAddr( @@ -93,7 +93,7 @@ public class ServerRMProxy extends RMProxy { @InterfaceAudience.Private @Override - protected void checkAllowedProtocols(Class protocol) { + public void checkAllowedProtocols(Class protocol) { Preconditions.checkArgument( protocol.isAssignableFrom(ResourceTracker.class), "ResourceManager does not support this protocol"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java index 7568bbb0584..3b0ec10595b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatResponse.java @@ -104,10 +104,10 @@ public abstract class NodeHeartbeatResponse { public abstract void setResource(Resource resource); - public abstract List getContainersToDecrease(); + public abstract List getContainersToUpdate(); - public abstract void addAllContainersToDecrease( - Collection containersToDecrease); + public abstract void addAllContainersToUpdate( + Collection containersToUpdate); public abstract ContainerQueuingLimit getContainerQueuingLimit(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java index 51c1a786d15..46c2b0b8789 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java @@ -75,7 +75,7 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse { private MasterKey containerTokenMasterKey = null; private MasterKey nmTokenMasterKey = null; private ContainerQueuingLimit containerQueuingLimit = null; - private List containersToDecrease = null; + private List containersToUpdate = null; private List containersToSignal = null; public NodeHeartbeatResponsePBImpl() { @@ -119,8 +119,8 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse { if (this.systemCredentials != null) { addSystemCredentialsToProto(); } - if (this.containersToDecrease != null) { - addContainersToDecreaseToProto(); + if (this.containersToUpdate != null) { + addContainersToUpdateToProto(); } if (this.containersToSignal != null) { addContainersToSignalToProto(); @@ -499,39 +499,39 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse { builder.addAllApplicationsToCleanup(iterable); } - private void initContainersToDecrease() { - if (this.containersToDecrease != null) { + private void initContainersToUpdate() { + if (this.containersToUpdate != null) { return; } NodeHeartbeatResponseProtoOrBuilder p = viaProto ? proto : builder; - List list = p.getContainersToDecreaseList(); - this.containersToDecrease = new ArrayList<>(); + List list = p.getContainersToUpdateList(); + this.containersToUpdate = new ArrayList<>(); for (ContainerProto c : list) { - this.containersToDecrease.add(convertFromProtoFormat(c)); + this.containersToUpdate.add(convertFromProtoFormat(c)); } } @Override - public List getContainersToDecrease() { - initContainersToDecrease(); - return this.containersToDecrease; + public List getContainersToUpdate() { + initContainersToUpdate(); + return this.containersToUpdate; } @Override - public void addAllContainersToDecrease( - final Collection containersToDecrease) { - if (containersToDecrease == null) { + public void addAllContainersToUpdate( + final Collection containersToBeUpdated) { + if (containersToBeUpdated == null) { return; } - initContainersToDecrease(); - this.containersToDecrease.addAll(containersToDecrease); + initContainersToUpdate(); + this.containersToUpdate.addAll(containersToBeUpdated); } - private void addContainersToDecreaseToProto() { + private void addContainersToUpdateToProto() { maybeInitBuilder(); - builder.clearContainersToDecrease(); - if (this.containersToDecrease == null) { + builder.clearContainersToUpdate(); + if (this.containersToUpdate == null) { return; } Iterable iterable = new @@ -539,7 +539,7 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse { @Override public Iterator iterator() { return new Iterator() { - private Iterator iter = containersToDecrease.iterator(); + private Iterator iter = containersToUpdate.iterator(); @Override public boolean hasNext() { return iter.hasNext(); @@ -555,7 +555,7 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse { }; } }; - builder.addAllContainersToDecrease(iterable); + builder.addAllContainersToUpdate(iterable); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java new file mode 100644 index 00000000000..3931f2bb639 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.failover; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.client.RMFailoverProxyProvider; +import org.apache.hadoop.yarn.conf.HAUtil; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class that creates proxy for specified protocols when federation is + * enabled. The class creates a federation aware failover provider, i.e. the + * failover provider uses the {@code FederationStateStore} to determine the + * current active ResourceManager + */ +@Private +@Unstable +public final class FederationProxyProviderUtil { + + public static final Logger LOG = + LoggerFactory.getLogger(FederationProxyProviderUtil.class); + + // Disable constructor + private FederationProxyProviderUtil() { + } + + /** + * Create a proxy for the specified protocol in the context of Federation. For + * non-HA, this is a direct connection to the ResourceManager address. When HA + * is enabled, the proxy handles the failover between the ResourceManagers as + * well. + * + * @param configuration Configuration to generate {@link ClientRMProxy} + * @param protocol Protocol for the proxy + * @param subClusterId the unique identifier or the sub-cluster + * @param user the user on whose behalf the proxy is being created + * @param Type information of the proxy + * @return Proxy to the RM + * @throws IOException on failure + */ + @Public + @Unstable + public static T createRMProxy(Configuration configuration, + Class protocol, SubClusterId subClusterId, UserGroupInformation user) + throws IOException { + return createRMProxy(configuration, protocol, subClusterId, user, null); + } + + /** + * Create a proxy for the specified protocol in the context of Federation. For + * non-HA, this is a direct connection to the ResourceManager address. When HA + * is enabled, the proxy handles the failover between the ResourceManagers as + * well. + * + * @param configuration Configuration to generate {@link ClientRMProxy} + * @param protocol Protocol for the proxy + * @param subClusterId the unique identifier or the sub-cluster + * @param user the user on whose behalf the proxy is being created + * @param token the auth token to use for connection + * @param Type information of the proxy + * @return Proxy to the RM + * @throws IOException on failure + */ + @Public + @Unstable + public static T createRMProxy(Configuration configuration, + final Class protocol, SubClusterId subClusterId, + UserGroupInformation user, Token token) + throws IOException { + final YarnConfiguration config = new YarnConfiguration(configuration); + updateConfForFederation(config, subClusterId.getId()); + return AMRMClientUtils.createRMProxy(config, protocol, user, token); + } + + /** + * Updating the conf with Federation as long as certain subclusterId. + * + * @param conf configuration + * @param subClusterId subclusterId for the conf + */ + public static void updateConfForFederation(Configuration conf, + String subClusterId) { + conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId); + /* + * In a Federation setting, we will connect to not just the local cluster RM + * but also multiple external RMs. The membership information of all the RMs + * that are currently participating in Federation is available in the + * central FederationStateStore. So we will: 1. obtain the RM service + * addresses from FederationStateStore using the + * FederationRMFailoverProxyProvider. 2. disable traditional HA as that + * depends on local configuration lookup for RMs using indexes. 3. we will + * enable federation failover IF traditional HA is enabled so that the + * appropriate failover RetryPolicy is initialized. + */ + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + conf.setClass(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER, + FederationRMFailoverProxyProvider.class, RMFailoverProxyProvider.class); + if (HAUtil.isHAEnabled(conf)) { + conf.setBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED, true); + conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java new file mode 100644 index 00000000000..c631208b783 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.failover; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.client.RMFailoverProxyProvider; +import org.apache.hadoop.yarn.client.RMProxy; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +/** + * A FailoverProxyProvider implementation that uses the + * {@code FederationStateStore} to determine the ResourceManager to connect to. + * This supports both HA and regular mode which is controlled by configuration. + */ +@Private +@Unstable +public class FederationRMFailoverProxyProvider + implements RMFailoverProxyProvider { + private static final Logger LOG = + LoggerFactory.getLogger(FederationRMFailoverProxyProvider.class); + + private RMProxy rmProxy; + private Class protocol; + private T current; + private YarnConfiguration conf; + private FederationStateStoreFacade facade; + private SubClusterId subClusterId; + private UserGroupInformation originalUser; + private boolean federationFailoverEnabled = false; + + @Override + public void init(Configuration configuration, RMProxy proxy, + Class proto) { + this.rmProxy = proxy; + this.protocol = proto; + this.rmProxy.checkAllowedProtocols(this.protocol); + String clusterId = configuration.get(YarnConfiguration.RM_CLUSTER_ID); + Preconditions.checkNotNull(clusterId, "Missing RM ClusterId"); + this.subClusterId = SubClusterId.newInstance(clusterId); + this.facade = facade.getInstance(); + if (configuration instanceof YarnConfiguration) { + this.conf = (YarnConfiguration) configuration; + } + federationFailoverEnabled = + conf.getBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED, + YarnConfiguration.DEFAULT_FEDERATION_FAILOVER_ENABLED); + + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, + conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, + YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES)); + + conf.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, + conf.getInt( + YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS, + YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS)); + + try { + this.originalUser = UserGroupInformation.getCurrentUser(); + LOG.info("Initialized Federation proxy for user: {}", + this.originalUser.getUserName()); + } catch (IOException e) { + LOG.warn("Could not get information of requester, ignoring for now."); + this.originalUser = null; + } + + } + + @VisibleForTesting + protected T createRMProxy(InetSocketAddress rmAddress) throws IOException { + return rmProxy.getProxy(conf, protocol, rmAddress); + } + + private T getProxyInternal(boolean isFailover) { + SubClusterInfo subClusterInfo; + // Use the existing proxy as a backup in case getting the new proxy fails. + // Note that if the first time it fails, the backup is also null. In that + // case we will hit NullPointerException and throw it back to AM. + T proxy = this.current; + try { + LOG.info("Failing over to the ResourceManager for SubClusterId: {}", + subClusterId); + subClusterInfo = facade.getSubCluster(subClusterId, isFailover); + // updating the conf with the refreshed RM addresses as proxy + // creations are based out of conf + updateRMAddress(subClusterInfo); + if (this.originalUser == null) { + InetSocketAddress rmAddress = rmProxy.getRMAddress(conf, protocol); + LOG.info( + "Connecting to {} subClusterId {} with protocol {}" + + " without a proxy user", + rmAddress, subClusterId, protocol.getSimpleName()); + proxy = createRMProxy(rmAddress); + } else { + // If the original ugi exists, always use that to create proxy because + // it contains up-to-date AMRMToken + proxy = this.originalUser.doAs(new PrivilegedExceptionAction() { + @Override + public T run() throws IOException { + InetSocketAddress rmAddress = rmProxy.getRMAddress(conf, protocol); + LOG.info( + "Connecting to {} subClusterId {} with protocol {} as user {}", + rmAddress, subClusterId, protocol.getSimpleName(), + originalUser); + return createRMProxy(rmAddress); + } + }); + } + } catch (Exception e) { + LOG.error("Exception while trying to create proxy to the ResourceManager" + + " for SubClusterId: {}", subClusterId, e); + if (proxy == null) { + throw new YarnRuntimeException( + String.format("Create initial proxy to the ResourceManager for" + + " SubClusterId %s failed", subClusterId), + e); + } + } + return proxy; + } + + private void updateRMAddress(SubClusterInfo subClusterInfo) { + if (subClusterInfo != null) { + if (protocol == ApplicationClientProtocol.class) { + conf.set(YarnConfiguration.RM_ADDRESS, + subClusterInfo.getClientRMServiceAddress()); + } else if (protocol == ApplicationMasterProtocol.class) { + conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, + subClusterInfo.getAMRMServiceAddress()); + } else if (protocol == ResourceManagerAdministrationProtocol.class) { + conf.set(YarnConfiguration.RM_ADMIN_ADDRESS, + subClusterInfo.getRMAdminServiceAddress()); + } + } + } + + @Override + public synchronized ProxyInfo getProxy() { + if (current == null) { + current = getProxyInternal(false); + } + return new ProxyInfo(current, subClusterId.getId()); + } + + @Override + public synchronized void performFailover(T currentProxy) { + // It will not return null proxy here + current = getProxyInternal(federationFailoverEnabled); + if (current != currentProxy) { + closeInternal(currentProxy); + } + } + + @Override + public Class getInterface() { + return protocol; + } + + private void closeInternal(T currentProxy) { + if (currentProxy != null) { + if (currentProxy instanceof Closeable) { + try { + ((Closeable) currentProxy).close(); + } catch (IOException e) { + LOG.warn("Exception while trying to close proxy", e); + } + } else { + RPC.stopProxy(currentProxy); + } + } + } + + /** + * Close all the proxy objects which have been opened over the lifetime of + * this proxy provider. + */ + @Override + public synchronized void close() throws IOException { + closeInternal(current); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/package-info.java new file mode 100644 index 00000000000..b1baa0c251a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.failover; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java new file mode 100644 index 00000000000..4cb9bbe5e49 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import java.util.Map; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.NoActiveSubclustersException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +/** + * Base abstract class for a weighted {@link ConfigurableFederationPolicy}. + */ +public abstract class AbstractConfigurableFederationPolicy + implements ConfigurableFederationPolicy { + + private WeightedPolicyInfo policyInfo = null; + private FederationPolicyInitializationContext policyContext; + private boolean isDirty; + + public AbstractConfigurableFederationPolicy() { + } + + @Override + public void reinitialize( + FederationPolicyInitializationContext initializationContext) + throws FederationPolicyInitializationException { + isDirty = true; + FederationPolicyInitializationContextValidator + .validate(initializationContext, this.getClass().getCanonicalName()); + + // perform consistency checks + WeightedPolicyInfo newPolicyInfo = WeightedPolicyInfo.fromByteBuffer( + initializationContext.getSubClusterPolicyConfiguration().getParams()); + + // if nothing has changed skip the rest of initialization + // and signal to childs that the reinit is free via isDirty var. + if (policyInfo != null && policyInfo.equals(newPolicyInfo)) { + isDirty = false; + return; + } + + validate(newPolicyInfo); + setPolicyInfo(newPolicyInfo); + this.policyContext = initializationContext; + } + + /** + * Overridable validation step for the policy configuration. + * + * @param newPolicyInfo the configuration to test. + * + * @throws FederationPolicyInitializationException if the configuration is not + * valid. + */ + public void validate(WeightedPolicyInfo newPolicyInfo) + throws FederationPolicyInitializationException { + if (newPolicyInfo == null) { + throw new FederationPolicyInitializationException( + "The policy to " + "validate should not be null."); + } + } + + /** + * Returns true whether the last reinitialization requires actual changes, or + * was "free" as the weights have not changed. This is used by subclasses + * overriding reinitialize and calling super.reinitialize() to know wheter to + * quit early. + * + * @return whether more work is needed to initialize. + */ + public boolean getIsDirty() { + return isDirty; + } + + /** + * Getter method for the configuration weights. + * + * @return the {@link WeightedPolicyInfo} representing the policy + * configuration. + */ + public WeightedPolicyInfo getPolicyInfo() { + return policyInfo; + } + + /** + * Setter method for the configuration weights. + * + * @param policyInfo the {@link WeightedPolicyInfo} representing the policy + * configuration. + */ + public void setPolicyInfo(WeightedPolicyInfo policyInfo) { + this.policyInfo = policyInfo; + } + + /** + * Getter method for the {@link FederationPolicyInitializationContext}. + * + * @return the context for this policy. + */ + public FederationPolicyInitializationContext getPolicyContext() { + return policyContext; + } + + /** + * Setter method for the {@link FederationPolicyInitializationContext}. + * + * @param policyContext the context to assign to this policy. + */ + public void setPolicyContext( + FederationPolicyInitializationContext policyContext) { + this.policyContext = policyContext; + } + + /** + * This methods gets active subclusters map from the {@code + * FederationStateStoreFacade} and validate it not being null/empty. + * + * @return the map of ids to info for all active subclusters. + * + * @throws YarnException if we can't get the list. + */ + protected Map getActiveSubclusters() + throws YarnException { + + Map activeSubclusters = + getPolicyContext().getFederationStateStoreFacade().getSubClusters(true); + + if (activeSubclusters == null || activeSubclusters.size() < 1) { + throw new NoActiveSubclustersException( + "Zero active subclusters, cannot pick where to send job."); + } + return activeSubclusters; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java new file mode 100644 index 00000000000..524577205a0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; + +/** + * This interface provides a general method to reinitialize a policy. The + * semantics are try-n-swap, so in case of an exception is thrown the + * implmentation must ensure the previous state and configuration is preserved. + */ +public interface ConfigurableFederationPolicy { + + /** + * This method is invoked to initialize of update the configuration of + * policies. The implementor should provide try-n-swap semantics, and retain + * state if possible. + * + * @param policyContext the new context to provide to implementor. + * + * @throws FederationPolicyInitializationException in case the initialization + * fails. + */ + void reinitialize(FederationPolicyInitializationContext policyContext) + throws FederationPolicyInitializationException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContext.java new file mode 100644 index 00000000000..4d29a41324b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContext.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; + +/** + * Context to (re)initialize a {@code FederationAMRMProxyPolicy} and {@code + * FederationRouterPolicy}. + */ +public class FederationPolicyInitializationContext { + + private SubClusterPolicyConfiguration federationPolicyConfiguration; + private SubClusterResolver federationSubclusterResolver; + private FederationStateStoreFacade federationStateStoreFacade; + private SubClusterId homeSubcluster; + + public FederationPolicyInitializationContext() { + federationPolicyConfiguration = null; + federationSubclusterResolver = null; + federationStateStoreFacade = null; + } + + public FederationPolicyInitializationContext( + SubClusterPolicyConfiguration policy, SubClusterResolver resolver, + FederationStateStoreFacade storeFacade, SubClusterId home) { + this.federationPolicyConfiguration = policy; + this.federationSubclusterResolver = resolver; + this.federationStateStoreFacade = storeFacade; + this.homeSubcluster = home; + } + + /** + * Getter for the {@link SubClusterPolicyConfiguration}. + * + * @return the {@link SubClusterPolicyConfiguration} to be used for + * initialization. + */ + public SubClusterPolicyConfiguration getSubClusterPolicyConfiguration() { + return federationPolicyConfiguration; + } + + /** + * Setter for the {@link SubClusterPolicyConfiguration}. + * + * @param fedPolicyConfiguration the {@link SubClusterPolicyConfiguration} to + * be used for initialization. + */ + public void setSubClusterPolicyConfiguration( + SubClusterPolicyConfiguration fedPolicyConfiguration) { + this.federationPolicyConfiguration = fedPolicyConfiguration; + } + + /** + * Getter for the {@link SubClusterResolver}. + * + * @return the {@link SubClusterResolver} to be used for initialization. + */ + public SubClusterResolver getFederationSubclusterResolver() { + return federationSubclusterResolver; + } + + /** + * Setter for the {@link SubClusterResolver}. + * + * @param federationSubclusterResolver the {@link SubClusterResolver} to be + * used for initialization. + */ + public void setFederationSubclusterResolver( + SubClusterResolver federationSubclusterResolver) { + this.federationSubclusterResolver = federationSubclusterResolver; + } + + /** + * Getter for the {@link FederationStateStoreFacade}. + * + * @return the facade. + */ + public FederationStateStoreFacade getFederationStateStoreFacade() { + return federationStateStoreFacade; + } + + /** + * Setter for the {@link FederationStateStoreFacade}. + * + * @param federationStateStoreFacade the facade. + */ + public void setFederationStateStoreFacade( + FederationStateStoreFacade federationStateStoreFacade) { + this.federationStateStoreFacade = federationStateStoreFacade; + } + + /** + * Returns the current home sub-cluster. Useful for default policy behaviors. + * + * @return the home sub-cluster. + */ + public SubClusterId getHomeSubcluster() { + return homeSubcluster; + } + + /** + * Sets in the context the home sub-cluster. Useful for default policy + * behaviors. + * + * @param homeSubcluster value to set. + */ + public void setHomeSubcluster(SubClusterId homeSubcluster) { + this.homeSubcluster = homeSubcluster; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContextValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContextValidator.java new file mode 100644 index 00000000000..da63bc1de46 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContextValidator.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; + +/** + * Helper class used to factor out common validation steps for policies. + */ +public final class FederationPolicyInitializationContextValidator { + + private FederationPolicyInitializationContextValidator() { + // disable constructor per checkstyle + } + + public static void validate( + FederationPolicyInitializationContext policyContext, String myType) + throws FederationPolicyInitializationException { + + if (myType == null) { + throw new FederationPolicyInitializationException( + "The myType parameter" + " should not be null."); + } + + if (policyContext == null) { + throw new FederationPolicyInitializationException( + "The FederationPolicyInitializationContext provided is null. Cannot" + + " reinitalize " + "successfully."); + } + + if (policyContext.getFederationStateStoreFacade() == null) { + throw new FederationPolicyInitializationException( + "The FederationStateStoreFacade provided is null. Cannot" + + " reinitalize successfully."); + } + + if (policyContext.getFederationSubclusterResolver() == null) { + throw new FederationPolicyInitializationException( + "The FederationSubclusterResolver provided is null. Cannot" + + " reinitalize successfully."); + } + + if (policyContext.getSubClusterPolicyConfiguration() == null) { + throw new FederationPolicyInitializationException( + "The SubClusterPolicyConfiguration provided is null. Cannot " + + "reinitalize successfully."); + } + + String intendedType = + policyContext.getSubClusterPolicyConfiguration().getType(); + + if (!myType.equals(intendedType)) { + throw new FederationPolicyInitializationException( + "The FederationPolicyConfiguration carries a type (" + intendedType + + ") different then mine (" + myType + + "). Cannot reinitialize successfully."); + } + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java new file mode 100644 index 00000000000..97e484846bd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class for Federation policy. + */ +@Private +public final class FederationPolicyUtils { + private static final Logger LOG = + LoggerFactory.getLogger(FederationPolicyUtils.class); + + public static final String NO_ACTIVE_SUBCLUSTER_AVAILABLE = + "No active SubCluster available to submit the request."; + + /** Disable constructor. */ + private FederationPolicyUtils() { + } + + /** + * A utilize method to instantiate a policy manager class given the type + * (class name) from {@link SubClusterPolicyConfiguration}. + * + * @param newType class name of the policy manager to create + * @return Policy manager + * @throws FederationPolicyInitializationException if fails + */ + public static FederationPolicyManager instantiatePolicyManager(String newType) + throws FederationPolicyInitializationException { + FederationPolicyManager federationPolicyManager = null; + try { + // create policy instance and set queue + Class c = Class.forName(newType); + federationPolicyManager = (FederationPolicyManager) c.newInstance(); + } catch (ClassNotFoundException e) { + throw new FederationPolicyInitializationException(e); + } catch (InstantiationException e) { + throw new FederationPolicyInitializationException(e); + } catch (IllegalAccessException e) { + throw new FederationPolicyInitializationException(e); + } + return federationPolicyManager; + } + + /** + * Get Federation policy configuration from state store, using default queue + * and configuration as fallback. + * + * @param queue the queue of the application + * @param conf the Yarn configuration + * @param federationFacade state store facade + * @return SubClusterPolicyConfiguration recreated + */ + public static SubClusterPolicyConfiguration loadPolicyConfiguration( + String queue, Configuration conf, + FederationStateStoreFacade federationFacade) { + + // The facade might cache this request, based on its parameterization + SubClusterPolicyConfiguration configuration = null; + if (queue != null) { + try { + configuration = federationFacade.getPolicyConfiguration(queue); + } catch (YarnException e) { + LOG.warn("Failed to get policy from FederationFacade with queue " + + queue + ": " + e.getMessage()); + } + } + + // If there is no policy configured for this queue, fallback to the baseline + // policy that is configured either in the store or via XML config + if (configuration == null) { + LOG.info("No policy configured for queue {} in StateStore," + + " fallback to default queue", queue); + queue = YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY; + try { + configuration = federationFacade.getPolicyConfiguration(queue); + } catch (YarnException e) { + LOG.warn("No fallback behavior defined in store, defaulting to XML " + + "configuration fallback behavior."); + } + } + + // or from XML conf otherwise. + if (configuration == null) { + LOG.info("No policy configured for default queue {} in StateStore," + + " fallback to local config", queue); + + String defaultFederationPolicyManager = + conf.get(YarnConfiguration.FEDERATION_POLICY_MANAGER, + YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER); + String defaultPolicyParamString = + conf.get(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS, + YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS); + ByteBuffer defaultPolicyParam = ByteBuffer + .wrap(defaultPolicyParamString.getBytes(StandardCharsets.UTF_8)); + + configuration = SubClusterPolicyConfiguration.newInstance(queue, + defaultFederationPolicyManager, defaultPolicyParam); + } + return configuration; + } + + /** + * Get AMRMProxy policy from state store, using default queue and + * configuration as fallback. + * + * @param queue the queue of the application + * @param oldPolicy the previous policy instance (can be null) + * @param conf the Yarn configuration + * @param federationFacade state store facade + * @param homeSubClusterId home sub-cluster id + * @return FederationAMRMProxyPolicy recreated + * @throws FederationPolicyInitializationException if fails + */ + public static FederationAMRMProxyPolicy loadAMRMPolicy(String queue, + FederationAMRMProxyPolicy oldPolicy, Configuration conf, + FederationStateStoreFacade federationFacade, + SubClusterId homeSubClusterId) + throws FederationPolicyInitializationException { + + // Local policy and its configuration + SubClusterPolicyConfiguration configuration = + loadPolicyConfiguration(queue, conf, federationFacade); + + // Instantiate the policyManager and get policy + FederationPolicyInitializationContext context = + new FederationPolicyInitializationContext(configuration, + federationFacade.getSubClusterResolver(), federationFacade, + homeSubClusterId); + + LOG.info("Creating policy manager of type: " + configuration.getType()); + FederationPolicyManager federationPolicyManager = + instantiatePolicyManager(configuration.getType()); + // set queue, reinit policy if required (implementation lazily check + // content of conf), and cache it + federationPolicyManager.setQueue(configuration.getQueue()); + return federationPolicyManager.getAMRMPolicy(context, oldPolicy); + } + + /** + * Validate if there is any active subcluster that is not blacklisted, it will + * throw an exception if there are no usable subclusters. + * + * @param activeSubClusters the list of subClusters as identified by + * {@link SubClusterId} currently active. + * @param blackListSubClusters the list of subClusters as identified by + * {@link SubClusterId} to blackList from the selection of the home + * subCluster. + * @throws FederationPolicyException if there are no usable subclusters. + */ + public static void validateSubClusterAvailability( + List activeSubClusters, + List blackListSubClusters) + throws FederationPolicyException { + if (activeSubClusters != null && !activeSubClusters.isEmpty()) { + if (blackListSubClusters == null) { + return; + } + for (SubClusterId scId : activeSubClusters) { + if (!blackListSubClusters.contains(scId)) { + // There is at least one active subcluster + return; + } + } + } + throw new FederationPolicyException( + FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java new file mode 100644 index 00000000000..bbf08e09614 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java @@ -0,0 +1,265 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager; +import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * This class provides a facade to the policy subsystem, and handles the + * lifecycle of policies (e.g., refresh from remote, default behaviors etc.). + */ +public class RouterPolicyFacade { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterPolicyFacade.class); + + private final SubClusterResolver subClusterResolver; + private final FederationStateStoreFacade federationFacade; + private Map globalConfMap; + + @VisibleForTesting + Map globalPolicyMap; + + public RouterPolicyFacade(Configuration conf, + FederationStateStoreFacade facade, SubClusterResolver resolver, + SubClusterId homeSubcluster) + throws FederationPolicyInitializationException { + + this.federationFacade = facade; + this.subClusterResolver = resolver; + this.globalConfMap = new ConcurrentHashMap<>(); + this.globalPolicyMap = new ConcurrentHashMap<>(); + + // load default behavior from store if possible + String defaultKey = YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY; + SubClusterPolicyConfiguration configuration = null; + try { + configuration = federationFacade.getPolicyConfiguration(defaultKey); + } catch (YarnException e) { + LOG.warn("No fallback behavior defined in store, defaulting to XML " + + "configuration fallback behavior."); + } + + // or from XML conf otherwise. + if (configuration == null) { + String defaultFederationPolicyManager = + conf.get(YarnConfiguration.FEDERATION_POLICY_MANAGER, + YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER); + String defaultPolicyParamString = + conf.get(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS, + YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS); + ByteBuffer defaultPolicyParam = ByteBuffer + .wrap(defaultPolicyParamString.getBytes(StandardCharsets.UTF_8)); + + configuration = SubClusterPolicyConfiguration.newInstance(defaultKey, + defaultFederationPolicyManager, defaultPolicyParam); + } + + // construct the required policy manager + FederationPolicyInitializationContext fallbackContext = + new FederationPolicyInitializationContext(configuration, + subClusterResolver, federationFacade, homeSubcluster); + FederationPolicyManager fallbackPolicyManager = + FederationPolicyUtils.instantiatePolicyManager(configuration.getType()); + fallbackPolicyManager.setQueue(defaultKey); + + // add to the cache the fallback behavior + globalConfMap.put(defaultKey, + fallbackContext.getSubClusterPolicyConfiguration()); + globalPolicyMap.put(defaultKey, + fallbackPolicyManager.getRouterPolicy(fallbackContext, null)); + + } + + /** + * This method provides a wrapper of all policy functionalities for routing . + * Internally it manages configuration changes, and policy init/reinit. + * + * @param appSubmissionContext the {@link ApplicationSubmissionContext} that + * has to be routed to an appropriate subCluster for execution. + * + * @param blackListSubClusters the list of subClusters as identified by + * {@link SubClusterId} to blackList from the selection of the home + * subCluster. + * + * @return the {@link SubClusterId} that will be the "home" for this + * application. + * + * @throws YarnException if there are issues initializing policies, or no + * valid sub-cluster id could be found for this app. + */ + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blackListSubClusters) throws YarnException { + + // the maps are concurrent, but we need to protect from reset() + // reinitialization mid-execution by creating a new reference local to this + // method. + Map cachedConfs = globalConfMap; + Map policyMap = globalPolicyMap; + + if (appSubmissionContext == null) { + throw new FederationPolicyException( + "The ApplicationSubmissionContext " + "cannot be null."); + } + + String queue = appSubmissionContext.getQueue(); + + // respecting YARN behavior we assume default queue if the queue is not + // specified. This also ensures that "null" can be used as a key to get the + // default behavior. + if (queue == null) { + queue = YarnConfiguration.DEFAULT_QUEUE_NAME; + } + + // the facade might cache this request, based on its parameterization + SubClusterPolicyConfiguration configuration = null; + + try { + configuration = federationFacade.getPolicyConfiguration(queue); + } catch (YarnException e) { + String errMsg = "There is no policy configured for the queue: " + queue + + ", falling back to defaults."; + LOG.warn(errMsg, e); + } + + // If there is no policy configured for this queue, fallback to the baseline + // policy that is configured either in the store or via XML config (and + // cached) + if (configuration == null) { + LOG.warn("There is no policies configured for queue: " + queue + " we" + + " fallback to default policy for: " + + YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY); + + queue = YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY; + try { + configuration = federationFacade.getPolicyConfiguration(queue); + } catch (YarnException e) { + String errMsg = "Cannot retrieve policy configured for the queue: " + + queue + ", falling back to defaults."; + LOG.warn(errMsg, e); + + } + } + + // the fallback is not configure via store, but via XML, using + // previously loaded configuration. + if (configuration == null) { + configuration = + cachedConfs.get(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY); + } + + // if the configuration has changed since last loaded, reinit the policy + // based on current configuration + if (!cachedConfs.containsKey(queue) + || !cachedConfs.get(queue).equals(configuration)) { + singlePolicyReinit(policyMap, cachedConfs, queue, configuration); + } + + FederationRouterPolicy policy = policyMap.get(queue); + if (policy == null) { + // this should never happen, as the to maps are updated together + throw new FederationPolicyException("No FederationRouterPolicy found " + + "for queue: " + appSubmissionContext.getQueue() + " (for " + + "application: " + appSubmissionContext.getApplicationId() + ") " + + "and no default specified."); + } + + return policy.getHomeSubcluster(appSubmissionContext, blackListSubClusters); + } + + /** + * This method reinitializes a policy and loads it in the policyMap. + * + * @param queue the queue to initialize a policy for. + * @param conf the configuration to use for initalization. + * + * @throws FederationPolicyInitializationException if initialization fails. + */ + private void singlePolicyReinit(Map policyMap, + Map cachedConfs, String queue, + SubClusterPolicyConfiguration conf) + throws FederationPolicyInitializationException { + + FederationPolicyInitializationContext context = + new FederationPolicyInitializationContext(conf, subClusterResolver, + federationFacade, null); + String newType = context.getSubClusterPolicyConfiguration().getType(); + FederationRouterPolicy routerPolicy = policyMap.get(queue); + + FederationPolicyManager federationPolicyManager = + FederationPolicyUtils.instantiatePolicyManager(newType); + // set queue, reinit policy if required (implementation lazily check + // content of conf), and cache it + federationPolicyManager.setQueue(queue); + routerPolicy = + federationPolicyManager.getRouterPolicy(context, routerPolicy); + + // we need the two put to be atomic (across multiple threads invoking + // this and reset operations) + synchronized (this) { + policyMap.put(queue, routerPolicy); + cachedConfs.put(queue, conf); + } + } + + /** + * This method flushes all cached configurations and policies. This should be + * invoked if the facade remains activity after very large churn of queues in + * the system. + */ + public synchronized void reset() { + + // remember the fallBack + SubClusterPolicyConfiguration conf = + globalConfMap.get(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY); + FederationRouterPolicy policy = + globalPolicyMap.get(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY); + + globalConfMap = new ConcurrentHashMap<>(); + globalPolicyMap = new ConcurrentHashMap<>(); + + // add to the cache a fallback with keyword null + globalConfMap.put(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY, conf); + globalPolicyMap.put(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY, + policy); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java new file mode 100644 index 00000000000..e853744e106 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/AbstractAMRMProxyPolicy.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import java.util.Map; + +import org.apache.hadoop.yarn.server.federation.policies.AbstractConfigurableFederationPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; + +/** + * Base abstract class for {@link FederationAMRMProxyPolicy} implementations, + * that provides common validation for reinitialization. + */ +public abstract class AbstractAMRMProxyPolicy extends + AbstractConfigurableFederationPolicy implements FederationAMRMProxyPolicy { + + @Override + public void validate(WeightedPolicyInfo newPolicyInfo) + throws FederationPolicyInitializationException { + super.validate(newPolicyInfo); + Map newWeights = + newPolicyInfo.getAMRMPolicyWeights(); + if (newWeights == null || newWeights.size() < 1) { + throw new FederationPolicyInitializationException( + "Weight vector cannot be null/empty."); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java new file mode 100644 index 00000000000..679f4d5fa41 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.UnknownSubclusterException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +/** + * An implementation of the {@link FederationAMRMProxyPolicy} that simply + * broadcasts each {@link ResourceRequest} to all the available sub-clusters. + */ +public class BroadcastAMRMProxyPolicy extends AbstractAMRMProxyPolicy { + + private Set knownClusterIds = new HashSet<>(); + + @Override + public void reinitialize( + FederationPolicyInitializationContext policyContext) + throws FederationPolicyInitializationException { + // overrides initialize to avoid weight checks that do no apply for + // this policy. + FederationPolicyInitializationContextValidator + .validate(policyContext, this.getClass().getCanonicalName()); + setPolicyContext(policyContext); + } + + @Override + public Map> splitResourceRequests( + List resourceRequests) throws YarnException { + + Map activeSubclusters = + getActiveSubclusters(); + + Map> answer = new HashMap<>(); + + // simply broadcast the resource request to all sub-clusters + for (SubClusterId subClusterId : activeSubclusters.keySet()) { + answer.put(subClusterId, resourceRequests); + knownClusterIds.add(subClusterId); + } + + return answer; + } + + @Override + public void notifyOfResponse(SubClusterId subClusterId, + AllocateResponse response) throws YarnException { + if (!knownClusterIds.contains(subClusterId)) { + throw new UnknownSubclusterException( + "The response is received from a subcluster that is unknown to this " + + "policy."); + } + // stateless policy does not care about responses + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java new file mode 100644 index 00000000000..0541df4346c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/FederationAMRMProxyPolicy.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * Implementors of this interface provide logic to split the list of + * {@link ResourceRequest}s received by the AM among various RMs. + */ +public interface FederationAMRMProxyPolicy + extends ConfigurableFederationPolicy { + + /** + * Splits the {@link ResourceRequest}s from the client across one or more + * sub-clusters based on the policy semantics (e.g., broadcast, load-based). + * + * @param resourceRequests the list of {@link ResourceRequest}s from the AM to + * be split + * + * @return map of sub-cluster as identified by {@link SubClusterId} to the + * list of {@link ResourceRequest}s that should be forwarded to it + * + * @throws YarnException in case the request is malformed or no viable + * sub-clusters can be found. + */ + Map> splitResourceRequests( + List resourceRequests) throws YarnException; + + /** + * This method should be invoked to notify the policy about responses being + * received. This is useful for stateful policies that make decisions based on + * previous responses being received. + * + * @param subClusterId the id of the subcluster sending the notification + * @param response the response received from one of the RMs + * + * @throws YarnException in case the response is not valid + */ + void notifyOfResponse(SubClusterId subClusterId, AllocateResponse response) + throws YarnException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java new file mode 100644 index 00000000000..454962f63f6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java @@ -0,0 +1,608 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.NoActiveSubclustersException; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; + +/** + * An implementation of the {@link FederationAMRMProxyPolicy} interface that + * carefully multicasts the requests with the following behavior: + * + *

+ * Host localized {@link ResourceRequest}s are always forwarded to the RM that + * owns the corresponding node, based on the feedback of a + * {@link SubClusterResolver}. If the {@link SubClusterResolver} cannot resolve + * this node we default to forwarding the {@link ResourceRequest} to the home + * sub-cluster. + *

+ * + *

+ * Rack localized {@link ResourceRequest}s are forwarded to the RMs that owns + * the corresponding rack. Note that in some deployments each rack could be + * striped across multiple RMs. Thsi policy respects that. If the + * {@link SubClusterResolver} cannot resolve this rack we default to forwarding + * the {@link ResourceRequest} to the home sub-cluster. + *

+ * + *

+ * ANY requests corresponding to node/rack local requests are forwarded only to + * the set of RMs that owns the corresponding localized requests. The number of + * containers listed in each ANY is proportional to the number of localized + * container requests (associated to this ANY via the same allocateRequestId). + *

+ * + *

+ * ANY that are not associated to node/rack local requests are split among RMs + * based on the "weights" in the {@link WeightedPolicyInfo} configuration *and* + * headroom information. The {@code headroomAlpha} parameter of the policy + * configuration indicates how much headroom contributes to the splitting + * choice. Value of 1.0f indicates the weights are interpreted only as 0/1 + * boolean but all splitting is based on the advertised headroom (fallback to + * 1/N for RMs that we don't have headroom info from). An {@code headroomAlpha} + * value of 0.0f means headroom is ignored and all splitting decisions are + * proportional to the "weights" in the configuration of the policy. + *

+ * + *

+ * ANY of zero size are forwarded to all known subclusters (i.e., subclusters + * where we scheduled containers before), as they may represent a user attempt + * to cancel a previous request (and we are mostly stateless now, so should + * forward to all known RMs). + *

+ * + *

+ * Invariants: + *

+ * + *

+ * The policy always excludes non-active RMs. + *

+ * + *

+ * The policy always excludes RMs that do not appear in the policy configuration + * weights, or have a weight of 0 (even if localized resources explicit refer to + * it). + *

+ * + *

+ * (Bar rounding to closest ceiling of fractional containers) The sum of + * requests made to multiple RMs at the ANY level "adds-up" to the user request. + * The maximum possible excess in a given request is a number of containers less + * or equal to number of sub-clusters in the federation. + *

+ */ +public class LocalityMulticastAMRMProxyPolicy extends AbstractAMRMProxyPolicy { + + public static final Logger LOG = + LoggerFactory.getLogger(LocalityMulticastAMRMProxyPolicy.class); + + private Map weights; + private SubClusterResolver resolver; + + private Map headroom; + private float hrAlpha; + private FederationStateStoreFacade federationFacade; + private AllocationBookkeeper bookkeeper; + private SubClusterId homeSubcluster; + + @Override + public void reinitialize( + FederationPolicyInitializationContext policyContext) + throws FederationPolicyInitializationException { + + // save reference to old weights + WeightedPolicyInfo tempPolicy = getPolicyInfo(); + + super.reinitialize(policyContext); + if (!getIsDirty()) { + return; + } + + Map newWeightsConverted = new HashMap<>(); + boolean allInactive = true; + WeightedPolicyInfo policy = getPolicyInfo(); + + if (policy.getAMRMPolicyWeights() != null + && policy.getAMRMPolicyWeights().size() > 0) { + for (Map.Entry e : policy.getAMRMPolicyWeights() + .entrySet()) { + if (e.getValue() > 0) { + allInactive = false; + } + newWeightsConverted.put(e.getKey().toId(), e.getValue()); + } + } + if (allInactive) { + // reset the policyInfo and throw + setPolicyInfo(tempPolicy); + throw new FederationPolicyInitializationException( + "The weights used to configure " + + "this policy are all set to zero! (no ResourceRequest could be " + + "forwarded with this setting.)"); + } + + if (policyContext.getHomeSubcluster() == null) { + setPolicyInfo(tempPolicy); + throw new FederationPolicyInitializationException("The homeSubcluster " + + "filed in the context must be initialized to use this policy"); + } + + weights = newWeightsConverted; + resolver = policyContext.getFederationSubclusterResolver(); + + if (headroom == null) { + headroom = new ConcurrentHashMap<>(); + } + hrAlpha = policy.getHeadroomAlpha(); + + this.federationFacade = + policyContext.getFederationStateStoreFacade(); + this.homeSubcluster = policyContext.getHomeSubcluster(); + + } + + @Override + public void notifyOfResponse(SubClusterId subClusterId, + AllocateResponse response) throws YarnException { + // stateless policy does not care about responses except tracking headroom + headroom.put(subClusterId, response.getAvailableResources()); + } + + @Override + public Map> splitResourceRequests( + List resourceRequests) throws YarnException { + + // object used to accumulate statistics about the answer, initialize with + // active subclusters. Create a new instance per call because this method + // can be called concurrently. + bookkeeper = new AllocationBookkeeper(); + bookkeeper.reinitialize(federationFacade.getSubClusters(true)); + + List nonLocalizedRequests = + new ArrayList(); + + SubClusterId targetId = null; + Set targetIds = null; + + // if the RR is resolved to a local subcluster add it directly (node and + // resolvable racks) + for (ResourceRequest rr : resourceRequests) { + targetId = null; + targetIds = null; + + // Handle: ANY (accumulated for later) + if (ResourceRequest.isAnyLocation(rr.getResourceName())) { + nonLocalizedRequests.add(rr); + continue; + } + + // Handle "node" requests + try { + targetId = resolver.getSubClusterForNode(rr.getResourceName()); + } catch (YarnException e) { + // this might happen as we can't differentiate node from rack names + // we log altogether later + } + if (bookkeeper.isActiveAndEnabled(targetId)) { + bookkeeper.addLocalizedNodeRR(targetId, rr); + continue; + } + + // Handle "rack" requests + try { + targetIds = resolver.getSubClustersForRack(rr.getResourceName()); + } catch (YarnException e) { + // this might happen as we can't differentiate node from rack names + // we log altogether later + } + if (targetIds != null && targetIds.size() > 0) { + boolean hasActive = false; + for (SubClusterId tid : targetIds) { + if (bookkeeper.isActiveAndEnabled(tid)) { + bookkeeper.addRackRR(tid, rr); + hasActive = true; + } + } + if (hasActive) { + continue; + } + } + + // Handle node/rack requests that the SubClusterResolver cannot map to + // any cluster. Defaulting to home subcluster. + if (LOG.isDebugEnabled()) { + LOG.debug("ERROR resolving sub-cluster for resourceName: " + + rr.getResourceName() + " we are falling back to homeSubCluster:" + + homeSubcluster); + } + + // If home-subcluster is not active, ignore node/rack request + if (bookkeeper.isActiveAndEnabled(homeSubcluster)) { + if (targetIds != null && targetIds.size() > 0) { + bookkeeper.addRackRR(homeSubcluster, rr); + } else { + bookkeeper.addLocalizedNodeRR(homeSubcluster, rr); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("The homeSubCluster (" + homeSubcluster + ") we are " + + "defaulting to is not active, the ResourceRequest " + + "will be ignored."); + } + } + } + + // handle all non-localized requests (ANY) + splitAnyRequests(nonLocalizedRequests, bookkeeper); + + return bookkeeper.getAnswer(); + } + + /** + * It splits a list of non-localized resource requests among sub-clusters. + */ + private void splitAnyRequests(List originalResourceRequests, + AllocationBookkeeper allocationBookkeeper) throws YarnException { + + for (ResourceRequest resourceRequest : originalResourceRequests) { + + // FIRST: pick the target set of subclusters (based on whether this RR + // is associated with other localized requests via an allocationId) + Long allocationId = resourceRequest.getAllocationRequestId(); + Set targetSubclusters; + if (allocationBookkeeper.getSubClustersForId(allocationId) != null) { + targetSubclusters = + allocationBookkeeper.getSubClustersForId(allocationId); + } else { + targetSubclusters = allocationBookkeeper.getActiveAndEnabledSC(); + } + + // SECOND: pick how much to ask to each RM for each request + splitIndividualAny(resourceRequest, targetSubclusters, + allocationBookkeeper); + } + } + + /** + * Return a projection of this ANY {@link ResourceRequest} that belongs to + * this sub-cluster. This is done based on the "count" of the containers that + * require locality in each sublcuster (if any) or based on the "weights" and + * headroom. + */ + private void splitIndividualAny(ResourceRequest originalResourceRequest, + Set targetSubclusters, + AllocationBookkeeper allocationBookkeeper) { + + long allocationId = originalResourceRequest.getAllocationRequestId(); + + for (SubClusterId targetId : targetSubclusters) { + float numContainer = originalResourceRequest.getNumContainers(); + + // If the ANY request has 0 containers to begin with we must forward it to + // any RM we have previously contacted (this might be the user way + // to cancel a previous request). + if (numContainer == 0 && headroom.containsKey(targetId)) { + allocationBookkeeper.addAnyRR(targetId, originalResourceRequest); + } + + // If ANY is associated with localized asks, split based on their ratio + if (allocationBookkeeper.getSubClustersForId(allocationId) != null) { + float localityBasedWeight = getLocalityBasedWeighting(allocationId, + targetId, allocationBookkeeper); + numContainer = numContainer * localityBasedWeight; + } else { + // split ANY based on load and policy configuration + float headroomWeighting = + getHeadroomWeighting(targetId, allocationBookkeeper); + float policyWeighting = + getPolicyConfigWeighting(targetId, allocationBookkeeper); + // hrAlpha controls how much headroom influencing decision + numContainer = numContainer + * (hrAlpha * headroomWeighting + (1 - hrAlpha) * policyWeighting); + } + + // if the calculated request is non-empty add it to the answer + if (numContainer > 0) { + ResourceRequest out = + ResourceRequest.newInstance(originalResourceRequest.getPriority(), + originalResourceRequest.getResourceName(), + originalResourceRequest.getCapability(), + originalResourceRequest.getNumContainers(), + originalResourceRequest.getRelaxLocality(), + originalResourceRequest.getNodeLabelExpression(), + originalResourceRequest.getExecutionTypeRequest()); + out.setAllocationRequestId(allocationId); + out.setNumContainers((int) Math.ceil(numContainer)); + if (ResourceRequest.isAnyLocation(out.getResourceName())) { + allocationBookkeeper.addAnyRR(targetId, out); + } else { + allocationBookkeeper.addRackRR(targetId, out); + } + } + } + } + + /** + * Compute the weight to assign to a subcluster based on how many local + * requests a subcluster is target of. + */ + private float getLocalityBasedWeighting(long reqId, SubClusterId targetId, + AllocationBookkeeper allocationBookkeeper) { + float totWeight = allocationBookkeeper.getTotNumLocalizedContainers(reqId); + float localWeight = + allocationBookkeeper.getNumLocalizedContainers(reqId, targetId); + return totWeight > 0 ? localWeight / totWeight : 0; + } + + /** + * Compute the "weighting" to give to a sublcuster based on the configured + * policy weights (for the active subclusters). + */ + private float getPolicyConfigWeighting(SubClusterId targetId, + AllocationBookkeeper allocationBookkeeper) { + float totWeight = allocationBookkeeper.totPolicyWeight; + Float localWeight = allocationBookkeeper.policyWeights.get(targetId); + return (localWeight != null && totWeight > 0) ? localWeight / totWeight : 0; + } + + /** + * Compute the weighting based on available headroom. This is proportional to + * the available headroom memory announced by RM, or to 1/N for RMs we have + * not seen yet. If all RMs report zero headroom, we fallback to 1/N again. + */ + private float getHeadroomWeighting(SubClusterId targetId, + AllocationBookkeeper allocationBookkeeper) { + + // baseline weight for all RMs + float headroomWeighting = + 1 / (float) allocationBookkeeper.getActiveAndEnabledSC().size(); + + // if we have headroom infomration for this sub-cluster (and we are safe + // from /0 issues) + if (headroom.containsKey(targetId) + && allocationBookkeeper.totHeadroomMemory > 0) { + // compute which portion of the RMs that are active/enabled have reported + // their headroom (needed as adjustment factor) + // (note: getActiveAndEnabledSC should never be null/zero) + float ratioHeadroomKnown = allocationBookkeeper.totHeadRoomEnabledRMs + / (float) allocationBookkeeper.getActiveAndEnabledSC().size(); + + // headroomWeighting is the ratio of headroom memory in the targetId + // cluster / total memory. The ratioHeadroomKnown factor is applied to + // adjust for missing information and ensure sum of allocated containers + // closely approximate what the user asked (small excess). + headroomWeighting = (headroom.get(targetId).getMemorySize() + / allocationBookkeeper.totHeadroomMemory) * (ratioHeadroomKnown); + } + return headroomWeighting; + } + + /** + * This helper class is used to book-keep the requests made to each + * subcluster, and maintain useful statistics to split ANY requests. + */ + private final class AllocationBookkeeper { + + // the answer being accumulated + private Map> answer = new TreeMap<>(); + + // stores how many containers we have allocated in each RM for localized + // asks, used to correctly "spread" the corresponding ANY + private Map> countContainersPerRM = + new HashMap<>(); + private Map totNumLocalizedContainers = new HashMap<>(); + + private Set activeAndEnabledSC = new HashSet<>(); + private float totHeadroomMemory = 0; + private int totHeadRoomEnabledRMs = 0; + private Map policyWeights; + private float totPolicyWeight = 0; + + private void reinitialize( + Map activeSubclusters) + throws YarnException { + if (activeSubclusters == null) { + throw new YarnRuntimeException("null activeSubclusters received"); + } + + // reset data structures + answer.clear(); + countContainersPerRM.clear(); + totNumLocalizedContainers.clear(); + activeAndEnabledSC.clear(); + totHeadroomMemory = 0; + totHeadRoomEnabledRMs = 0; + // save the reference locally in case the weights get reinitialized + // concurrently + policyWeights = weights; + totPolicyWeight = 0; + + // pre-compute the set of subclusters that are both active and enabled by + // the policy weights, and accumulate their total weight + for (Map.Entry entry : policyWeights.entrySet()) { + if (entry.getValue() > 0 + && activeSubclusters.containsKey(entry.getKey())) { + activeAndEnabledSC.add(entry.getKey()); + totPolicyWeight += entry.getValue(); + } + } + + if (activeAndEnabledSC.size() < 1) { + throw new NoActiveSubclustersException( + "None of the subclusters enabled in this policy (weight>0) are " + + "currently active we cannot forward the ResourceRequest(s)"); + } + + // pre-compute headroom-based weights for active/enabled subclusters + for (Map.Entry r : headroom.entrySet()) { + if (activeAndEnabledSC.contains(r.getKey())) { + totHeadroomMemory += r.getValue().getMemorySize(); + totHeadRoomEnabledRMs++; + } + } + } + + /** + * Add to the answer a localized node request, and keeps track of statistics + * on a per-allocation-id and per-subcluster bases. + */ + private void addLocalizedNodeRR(SubClusterId targetId, ResourceRequest rr) { + Preconditions + .checkArgument(!ResourceRequest.isAnyLocation(rr.getResourceName())); + + if (!countContainersPerRM.containsKey(rr.getAllocationRequestId())) { + countContainersPerRM.put(rr.getAllocationRequestId(), new HashMap<>()); + } + if (!countContainersPerRM.get(rr.getAllocationRequestId()) + .containsKey(targetId)) { + countContainersPerRM.get(rr.getAllocationRequestId()).put(targetId, + new AtomicLong(0)); + } + countContainersPerRM.get(rr.getAllocationRequestId()).get(targetId) + .addAndGet(rr.getNumContainers()); + + if (!totNumLocalizedContainers.containsKey(rr.getAllocationRequestId())) { + totNumLocalizedContainers.put(rr.getAllocationRequestId(), + new AtomicLong(0)); + } + totNumLocalizedContainers.get(rr.getAllocationRequestId()) + .addAndGet(rr.getNumContainers()); + + internalAddToAnswer(targetId, rr); + } + + /** + * Add a rack-local request to the final asnwer. + */ + public void addRackRR(SubClusterId targetId, ResourceRequest rr) { + Preconditions + .checkArgument(!ResourceRequest.isAnyLocation(rr.getResourceName())); + internalAddToAnswer(targetId, rr); + } + + /** + * Add an ANY request to the final answer. + */ + private void addAnyRR(SubClusterId targetId, ResourceRequest rr) { + Preconditions + .checkArgument(ResourceRequest.isAnyLocation(rr.getResourceName())); + internalAddToAnswer(targetId, rr); + } + + private void internalAddToAnswer(SubClusterId targetId, + ResourceRequest partialRR) { + if (!answer.containsKey(targetId)) { + answer.put(targetId, new ArrayList()); + } + answer.get(targetId).add(partialRR); + } + + /** + * Return all known subclusters associated with an allocation id. + * + * @param allocationId the allocation id considered + * + * @return the list of {@link SubClusterId}s associated with this allocation + * id + */ + private Set getSubClustersForId(long allocationId) { + if (countContainersPerRM.get(allocationId) == null) { + return null; + } + return countContainersPerRM.get(allocationId).keySet(); + } + + /** + * Return the answer accumulated so far. + * + * @return the answer + */ + private Map> getAnswer() { + return answer; + } + + /** + * Return the set of sub-clusters that are both active and allowed by our + * policy (weight > 0). + * + * @return a set of active and enabled {@link SubClusterId}s + */ + private Set getActiveAndEnabledSC() { + return activeAndEnabledSC; + } + + /** + * Return the total number of container coming from localized requests + * matching an allocation Id. + */ + private long getTotNumLocalizedContainers(long allocationId) { + AtomicLong c = totNumLocalizedContainers.get(allocationId); + return c == null ? 0 : c.get(); + } + + /** + * Returns the number of containers matching an allocation Id that are + * localized in the targetId subcluster. + */ + private long getNumLocalizedContainers(long allocationId, + SubClusterId targetId) { + AtomicLong c = countContainersPerRM.get(allocationId).get(targetId); + return c == null ? 0 : c.get(); + } + + /** + * Returns true is the subcluster request is both active and enabled. + */ + private boolean isActiveAndEnabled(SubClusterId targetId) { + if (targetId == null) { + return false; + } else { + return getActiveAndEnabledSC().contains(targetId); + } + } + + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java new file mode 100644 index 00000000000..3783df645c5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * An implementation of the {@link FederationAMRMProxyPolicy} that simply + * rejects all requests. Useful to prevent apps from accessing any sub-cluster. + */ +public class RejectAMRMProxyPolicy extends AbstractAMRMProxyPolicy { + + private Set knownClusterIds = new HashSet<>(); + + @Override + public void reinitialize(FederationPolicyInitializationContext policyContext) + throws FederationPolicyInitializationException { + // overrides initialize to avoid weight checks that do no apply for + // this policy. + FederationPolicyInitializationContextValidator.validate(policyContext, + this.getClass().getCanonicalName()); + setPolicyContext(policyContext); + } + + @Override + public Map> splitResourceRequests( + List resourceRequests) throws YarnException { + throw new FederationPolicyException("The policy configured for this queue " + + "rejects all routing requests by construction."); + } + + @Override + public void notifyOfResponse(SubClusterId subClusterId, + AllocateResponse response) throws YarnException { + // This might be invoked for applications started with a previous policy, + // do nothing for this policy. + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/package-info.java new file mode 100644 index 00000000000..ef72647baec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** AMRMPRoxy policies. **/ +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/dao/WeightedPolicyInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/dao/WeightedPolicyInfo.java new file mode 100644 index 00000000000..e7b8afe1490 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/dao/WeightedPolicyInfo.java @@ -0,0 +1,251 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.dao; + +import java.io.StringReader; +import java.io.StringWriter; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; + +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.sun.jersey.api.json.JSONMarshaller; +import com.sun.jersey.api.json.JSONUnmarshaller; + +/** + * This is a DAO class for the configuration of parameteres for federation + * policies. This generalizes several possible configurations as two lists of + * {@link SubClusterIdInfo} and corresponding weights as a {@link Float}. The + * interpretation of the weight is left to the logic in the policy. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +@XmlRootElement(name = "federation-policy") +@XmlAccessorType(XmlAccessType.FIELD) +public class WeightedPolicyInfo { + + private static final Logger LOG = + LoggerFactory.getLogger(WeightedPolicyInfo.class); + private static JSONJAXBContext jsonjaxbContext = initContext(); + private Map routerPolicyWeights = new HashMap<>(); + private Map amrmPolicyWeights = new HashMap<>(); + private float headroomAlpha; + + public WeightedPolicyInfo() { + // JAXB needs this + } + + private static JSONJAXBContext initContext() { + try { + return new JSONJAXBContext(JSONConfiguration.DEFAULT, + WeightedPolicyInfo.class); + } catch (JAXBException e) { + LOG.error("Error parsing the policy.", e); + } + return null; + } + + /** + * Deserializes a {@link WeightedPolicyInfo} from a byte UTF-8 JSON + * representation. + * + * @param bb the input byte representation. + * + * @return the {@link WeightedPolicyInfo} represented. + * + * @throws FederationPolicyInitializationException if a deserializaiton error + * occurs. + */ + public static WeightedPolicyInfo fromByteBuffer(ByteBuffer bb) + throws FederationPolicyInitializationException { + + if (jsonjaxbContext == null) { + throw new FederationPolicyInitializationException( + "JSONJAXBContext should" + " not be null."); + } + + try { + JSONUnmarshaller unmarshaller = jsonjaxbContext.createJSONUnmarshaller(); + final byte[] bytes = new byte[bb.remaining()]; + bb.get(bytes); + String params = new String(bytes, StandardCharsets.UTF_8); + + WeightedPolicyInfo weightedPolicyInfo = unmarshaller.unmarshalFromJSON( + new StringReader(params), WeightedPolicyInfo.class); + return weightedPolicyInfo; + } catch (JAXBException j) { + throw new FederationPolicyInitializationException(j); + } + } + + /** + * Getter of the router weights. + * + * @return the router weights. + */ + public Map getRouterPolicyWeights() { + return routerPolicyWeights; + } + + /** + * Setter method for Router weights. + * + * @param policyWeights the router weights. + */ + public void setRouterPolicyWeights( + Map policyWeights) { + this.routerPolicyWeights = policyWeights; + } + + /** + * Getter for AMRMProxy weights. + * + * @return the AMRMProxy weights. + */ + public Map getAMRMPolicyWeights() { + return amrmPolicyWeights; + } + + /** + * Setter method for ARMRMProxy weights. + * + * @param policyWeights the amrmproxy weights. + */ + public void setAMRMPolicyWeights(Map policyWeights) { + this.amrmPolicyWeights = policyWeights; + } + + /** + * Converts the policy into a byte array representation in the input + * {@link ByteBuffer}. + * + * @return byte array representation of this policy configuration. + * + * @throws FederationPolicyInitializationException if a serialization error + * occurs. + */ + public ByteBuffer toByteBuffer() + throws FederationPolicyInitializationException { + if (jsonjaxbContext == null) { + throw new FederationPolicyInitializationException( + "JSONJAXBContext should" + " not be null."); + } + try { + String s = toJSONString(); + return ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)); + } catch (JAXBException j) { + throw new FederationPolicyInitializationException(j); + } + } + + private String toJSONString() throws JAXBException { + JSONMarshaller marshaller = jsonjaxbContext.createJSONMarshaller(); + marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true); + StringWriter sw = new StringWriter(256); + marshaller.marshallToJSON(this, sw); + return sw.toString(); + } + + @Override + public boolean equals(Object other) { + + if (other == null || !other.getClass().equals(this.getClass())) { + return false; + } + + WeightedPolicyInfo otherPolicy = (WeightedPolicyInfo) other; + Map otherAMRMWeights = + otherPolicy.getAMRMPolicyWeights(); + Map otherRouterWeights = + otherPolicy.getRouterPolicyWeights(); + + boolean amrmWeightsMatch = + otherAMRMWeights != null && getAMRMPolicyWeights() != null + && CollectionUtils.isEqualCollection(otherAMRMWeights.entrySet(), + getAMRMPolicyWeights().entrySet()); + + boolean routerWeightsMatch = + otherRouterWeights != null && getRouterPolicyWeights() != null + && CollectionUtils.isEqualCollection(otherRouterWeights.entrySet(), + getRouterPolicyWeights().entrySet()); + + return amrmWeightsMatch && routerWeightsMatch; + } + + @Override + public int hashCode() { + return 31 * amrmPolicyWeights.hashCode() + routerPolicyWeights.hashCode(); + } + + /** + * Return the parameter headroomAlpha, used by policies that balance + * weight-based and load-based considerations in their decisions. + * + * For policies that use this parameter, values close to 1 indicate that most + * of the decision should be based on currently observed headroom from various + * sub-clusters, values close to zero, indicate that the decision should be + * mostly based on weights and practically ignore current load. + * + * @return the value of headroomAlpha. + */ + public float getHeadroomAlpha() { + return headroomAlpha; + } + + /** + * Set the parameter headroomAlpha, used by policies that balance weight-based + * and load-based considerations in their decisions. + * + * For policies that use this parameter, values close to 1 indicate that most + * of the decision should be based on currently observed headroom from various + * sub-clusters, values close to zero, indicate that the decision should be + * mostly based on weights and practically ignore current load. + * + * @param headroomAlpha the value to use for balancing. + */ + public void setHeadroomAlpha(float headroomAlpha) { + this.headroomAlpha = headroomAlpha; + } + + @Override + public String toString() { + try { + return toJSONString(); + } catch (JAXBException e) { + e.printStackTrace(); + return "Error serializing to string."; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/dao/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/dao/package-info.java new file mode 100644 index 00000000000..c292e52dd31 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/dao/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** DAO objects for serializing/deserializing policy configurations. **/ +package org.apache.hadoop.yarn.server.federation.policies.dao; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/FederationPolicyException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/FederationPolicyException.java new file mode 100644 index 00000000000..24fe4213ea8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/FederationPolicyException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.exceptions; + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * Generic policy exception. + */ +public class FederationPolicyException extends YarnException { + public FederationPolicyException(String s) { + super(s); + } + + public FederationPolicyException(Throwable t) { + super(t); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/FederationPolicyInitializationException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/FederationPolicyInitializationException.java new file mode 100644 index 00000000000..fcc09c2fc83 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/FederationPolicyInitializationException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.exceptions; + +/** + * This exception is thrown when the initialization of a federation policy is + * not successful. + */ +public class FederationPolicyInitializationException + extends FederationPolicyException { + public FederationPolicyInitializationException(String message) { + super(message); + } + + public FederationPolicyInitializationException(Throwable j) { + super(j); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/NoActiveSubclustersException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/NoActiveSubclustersException.java new file mode 100644 index 00000000000..a427944ee65 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/NoActiveSubclustersException.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.exceptions; + +/** + * This exception is thrown when policies cannot locate any active cluster. + */ +public class NoActiveSubclustersException extends FederationPolicyException { + public NoActiveSubclustersException(String s) { + super(s); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/UnknownSubclusterException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/UnknownSubclusterException.java new file mode 100644 index 00000000000..8a0fb4f4217 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/UnknownSubclusterException.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.exceptions; + +/** + * This exception is thrown whenever a policy is given a {@code SubClusterId} + * that is unknown. + */ +public class UnknownSubclusterException extends FederationPolicyException { + public UnknownSubclusterException(String s) { + super(s); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/package-info.java new file mode 100644 index 00000000000..ad2d5430637 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/exceptions/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** Exceptions for policies. **/ +package org.apache.hadoop.yarn.server.federation.policies.exceptions; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/AbstractPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/AbstractPolicyManager.java new file mode 100644 index 00000000000..f7a89c614fe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/AbstractPolicyManager.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; + +/** + * This class provides basic implementation for common methods that multiple + * policies will need to implement. + */ +public abstract class AbstractPolicyManager implements + FederationPolicyManager { + + private String queue; + @SuppressWarnings("checkstyle:visibilitymodifier") + protected Class routerFederationPolicy; + @SuppressWarnings("checkstyle:visibilitymodifier") + protected Class amrmProxyFederationPolicy; + + public static final Logger LOG = + LoggerFactory.getLogger(AbstractPolicyManager.class); + /** + * This default implementation validates the + * {@link FederationPolicyInitializationContext}, + * then checks whether it needs to reinstantiate the class (null or + * mismatching type), and reinitialize the policy. + * + * @param federationPolicyContext the current context + * @param oldInstance the existing (possibly null) instance. + * + * @return a valid and fully reinitalized {@link FederationAMRMProxyPolicy} + * instance + * + * @throws FederationPolicyInitializationException if the reinitalization is + * not valid, and ensure + * previous state is preserved + */ + public FederationAMRMProxyPolicy getAMRMPolicy( + FederationPolicyInitializationContext federationPolicyContext, + FederationAMRMProxyPolicy oldInstance) + throws FederationPolicyInitializationException { + + if (amrmProxyFederationPolicy == null) { + throw new FederationPolicyInitializationException("The parameter " + + "amrmProxyFederationPolicy should be initialized in " + + this.getClass().getSimpleName() + " constructor."); + } + + try { + return (FederationAMRMProxyPolicy) internalPolicyGetter( + federationPolicyContext, oldInstance, amrmProxyFederationPolicy); + } catch (ClassCastException e) { + throw new FederationPolicyInitializationException(e); + } + + } + + /** + * This default implementation validates the + * {@link FederationPolicyInitializationContext}, + * then checks whether it needs to reinstantiate the class (null or + * mismatching type), and reinitialize the policy. + * + * @param federationPolicyContext the current context + * @param oldInstance the existing (possibly null) instance. + * + * @return a valid and fully reinitalized {@link FederationRouterPolicy} + * instance + * + * @throws FederationPolicyInitializationException if the reinitalization is + * not valid, and ensure + * previous state is preserved + */ + + public FederationRouterPolicy getRouterPolicy( + FederationPolicyInitializationContext federationPolicyContext, + FederationRouterPolicy oldInstance) + throws FederationPolicyInitializationException { + + //checks that sub-types properly initialize the types of policies + if (routerFederationPolicy == null) { + throw new FederationPolicyInitializationException("The policy " + + "type should be initialized in " + this.getClass().getSimpleName() + + " constructor."); + } + + try { + return (FederationRouterPolicy) internalPolicyGetter( + federationPolicyContext, oldInstance, routerFederationPolicy); + } catch (ClassCastException e) { + throw new FederationPolicyInitializationException(e); + } + } + + @Override + public SubClusterPolicyConfiguration serializeConf() + throws FederationPolicyInitializationException { + // default implementation works only for sub-classes which do not require + // any parameters + ByteBuffer buf = ByteBuffer.allocate(0); + return SubClusterPolicyConfiguration + .newInstance(getQueue(), this.getClass().getCanonicalName(), buf); + } + + @Override + public String getQueue() { + return queue; + } + + @Override + public void setQueue(String queue) { + this.queue = queue; + } + + /** + * Common functionality to instantiate a reinitialize a {@link + * ConfigurableFederationPolicy}. + */ + private ConfigurableFederationPolicy internalPolicyGetter( + final FederationPolicyInitializationContext federationPolicyContext, + ConfigurableFederationPolicy oldInstance, Class policy) + throws FederationPolicyInitializationException { + + FederationPolicyInitializationContextValidator + .validate(federationPolicyContext, this.getClass().getCanonicalName()); + + if (oldInstance == null || !oldInstance.getClass().equals(policy)) { + try { + oldInstance = (ConfigurableFederationPolicy) policy.newInstance(); + } catch (InstantiationException e) { + throw new FederationPolicyInitializationException(e); + } catch (IllegalAccessException e) { + throw new FederationPolicyInitializationException(e); + } + } + + //copying the context to avoid side-effects + FederationPolicyInitializationContext modifiedContext = + updateContext(federationPolicyContext, + oldInstance.getClass().getCanonicalName()); + + oldInstance.reinitialize(modifiedContext); + return oldInstance; + } + + /** + * This method is used to copy-on-write the context, that will be passed + * downstream to the router/amrmproxy policies. + */ + private FederationPolicyInitializationContext updateContext( + FederationPolicyInitializationContext federationPolicyContext, + String type) { + // copying configuration and context to avoid modification of original + SubClusterPolicyConfiguration newConf = SubClusterPolicyConfiguration + .newInstance(federationPolicyContext + .getSubClusterPolicyConfiguration()); + newConf.setType(type); + + return new FederationPolicyInitializationContext(newConf, + federationPolicyContext.getFederationSubclusterResolver(), + federationPolicyContext.getFederationStateStoreFacade(), + federationPolicyContext.getHomeSubcluster()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/FederationPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/FederationPolicyManager.java new file mode 100644 index 00000000000..1434c80f427 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/FederationPolicyManager.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +/** + * + * Implementors need to provide the ability to serliaze a policy and its + * configuration as a {@link SubClusterPolicyConfiguration}, as well as provide + * (re)initialization mechanics for the underlying + * {@link FederationAMRMProxyPolicy} and {@link FederationRouterPolicy}. + * + * The serialization aspects are used by admin APIs or a policy engine to store + * a serialized configuration in the {@code FederationStateStore}, while the + * getters methods are used to obtain a propertly inizialized policy in the + * {@code Router} and {@code AMRMProxy} respectively. + * + * This interface by design binds together {@link FederationAMRMProxyPolicy} and + * {@link FederationRouterPolicy} and provide lifecycle support for + * serialization and deserialization, to reduce configuration mistakes + * (combining incompatible policies). + * + */ +public interface FederationPolicyManager { + + /** + * If the current instance is compatible, this method returns the same + * instance of {@link FederationAMRMProxyPolicy} reinitialized with the + * current context, otherwise a new instance initialized with the current + * context is provided. If the instance is compatible with the current class + * the implementors should attempt to reinitalize (retaining state). To affect + * a complete policy reset oldInstance should be null. + * + * @param policyContext the current context + * @param oldInstance the existing (possibly null) instance. + * + * @return an updated {@link FederationAMRMProxyPolicy }. + * + * @throws FederationPolicyInitializationException if the initialization + * cannot be completed properly. The oldInstance should be still + * valid in case of failed initialization. + */ + FederationAMRMProxyPolicy getAMRMPolicy( + FederationPolicyInitializationContext policyContext, + FederationAMRMProxyPolicy oldInstance) + throws FederationPolicyInitializationException; + + /** + * If the current instance is compatible, this method returns the same + * instance of {@link FederationRouterPolicy} reinitialized with the current + * context, otherwise a new instance initialized with the current context is + * provided. If the instance is compatible with the current class the + * implementors should attempt to reinitalize (retaining state). To affect a + * complete policy reset oldInstance shoulb be set to null. + * + * @param policyContext the current context + * @param oldInstance the existing (possibly null) instance. + * + * @return an updated {@link FederationRouterPolicy}. + * + * @throws FederationPolicyInitializationException if the initalization cannot + * be completed properly. The oldInstance should be still valid in + * case of failed initialization. + */ + FederationRouterPolicy getRouterPolicy( + FederationPolicyInitializationContext policyContext, + FederationRouterPolicy oldInstance) + throws FederationPolicyInitializationException; + + /** + * This method is invoked to derive a {@link SubClusterPolicyConfiguration}. + * This is to be used when writing a policy object in the federation policy + * store. + * + * @return a valid policy configuration representing this object + * parametrization. + * + * @throws FederationPolicyInitializationException if the current state cannot + * be serialized properly + */ + SubClusterPolicyConfiguration serializeConf() + throws FederationPolicyInitializationException; + + /** + * This method returns the queue this policy is configured for. + * + * @return the name of the queue. + */ + String getQueue(); + + /** + * This methods provides a setter for the queue this policy is specified for. + * + * @param queue the name of the queue. + */ + void setQueue(String queue); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/HashBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/HashBroadcastPolicyManager.java new file mode 100644 index 00000000000..08ab08fedf4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/HashBroadcastPolicyManager.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.HashBasedRouterPolicy; + +/** + * Policy that routes applications via hashing of their queuename, and broadcast + * resource requests. This picks a {@link HashBasedRouterPolicy} for the router + * and a {@link BroadcastAMRMProxyPolicy} for the amrmproxy as they are designed + * to work together. + */ +public class HashBroadcastPolicyManager extends AbstractPolicyManager { + + public HashBroadcastPolicyManager() { + // this structurally hard-codes two compatible policies for Router and + // AMRMProxy. + routerFederationPolicy = HashBasedRouterPolicy.class; + amrmProxyFederationPolicy = BroadcastAMRMProxyPolicy.class; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java new file mode 100644 index 00000000000..8139e1202dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/PriorityBroadcastPolicyManager.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Policy that allows operator to configure "weights" for routing. This picks a + * {@link PriorityRouterPolicy} for the router and a + * {@link BroadcastAMRMProxyPolicy} for the amrmproxy as they are designed to + * work together. + */ +public class PriorityBroadcastPolicyManager extends AbstractPolicyManager { + + private WeightedPolicyInfo weightedPolicyInfo; + + public PriorityBroadcastPolicyManager() { + // this structurally hard-codes two compatible policies for Router and + // AMRMProxy. + routerFederationPolicy = PriorityRouterPolicy.class; + amrmProxyFederationPolicy = BroadcastAMRMProxyPolicy.class; + weightedPolicyInfo = new WeightedPolicyInfo(); + } + + @Override + public SubClusterPolicyConfiguration serializeConf() + throws FederationPolicyInitializationException { + ByteBuffer buf = weightedPolicyInfo.toByteBuffer(); + return SubClusterPolicyConfiguration.newInstance(getQueue(), + this.getClass().getCanonicalName(), buf); + } + + @VisibleForTesting + public WeightedPolicyInfo getWeightedPolicyInfo() { + return weightedPolicyInfo; + } + + @VisibleForTesting + public void setWeightedPolicyInfo(WeightedPolicyInfo weightedPolicyInfo) { + this.weightedPolicyInfo = weightedPolicyInfo; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/RejectAllPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/RejectAllPolicyManager.java new file mode 100644 index 00000000000..7bd7a1b5de8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/RejectAllPolicyManager.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.RejectAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.RejectRouterPolicy; + +/** + * This class represents a simple implementation of a {@code + * FederationPolicyManager}. + * + * This policy rejects all reuqests for both router and amrmproxy routing. This + * is to be used to prevent applications in a specific queue (or if used as + * default for non-configured queues) from accessing cluster resources. + */ +public class RejectAllPolicyManager extends AbstractPolicyManager { + + public RejectAllPolicyManager() { + // this structurally hard-codes two compatible policies for Router and + // AMRMProxy. + routerFederationPolicy = RejectRouterPolicy.class; + amrmProxyFederationPolicy = RejectAMRMProxyPolicy.class; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/UniformBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/UniformBroadcastPolicyManager.java new file mode 100644 index 00000000000..5db0466bd89 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/UniformBroadcastPolicyManager.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy; + +/** + * This class represents a simple implementation of a {@code + * FederationPolicyManager}. + * + * It combines the basic policies: {@link UniformRandomRouterPolicy} and + * {@link BroadcastAMRMProxyPolicy}, which are designed to work together and + * "spread" the load among sub-clusters uniformly. + * + * This simple policy might impose heavy load on the RMs and return more + * containers than a job requested as all requests are (replicated and) + * broadcasted. + */ +public class UniformBroadcastPolicyManager extends AbstractPolicyManager { + + public UniformBroadcastPolicyManager() { + // this structurally hard-codes two compatible policies for Router and + // AMRMProxy. + routerFederationPolicy = UniformRandomRouterPolicy.class; + amrmProxyFederationPolicy = BroadcastAMRMProxyPolicy.class; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java new file mode 100644 index 00000000000..109b53437ca --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/WeightedLocalityPolicyManager.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.LocalityMulticastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.router.WeightedRandomRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +import java.nio.ByteBuffer; + +/** + * Policy that allows operator to configure "weights" for routing. This picks a + * {@link WeightedRandomRouterPolicy} for the router and a {@link + * LocalityMulticastAMRMProxyPolicy} for the amrmproxy as they are designed to + * work together. + */ +public class WeightedLocalityPolicyManager + extends AbstractPolicyManager { + + private WeightedPolicyInfo weightedPolicyInfo; + + public WeightedLocalityPolicyManager() { + //this structurally hard-codes two compatible policies for Router and + // AMRMProxy. + routerFederationPolicy = WeightedRandomRouterPolicy.class; + amrmProxyFederationPolicy = LocalityMulticastAMRMProxyPolicy.class; + weightedPolicyInfo = new WeightedPolicyInfo(); + } + + @Override + public SubClusterPolicyConfiguration serializeConf() + throws FederationPolicyInitializationException { + ByteBuffer buf = weightedPolicyInfo.toByteBuffer(); + return SubClusterPolicyConfiguration + .newInstance(getQueue(), this.getClass().getCanonicalName(), buf); + } + + @VisibleForTesting + public WeightedPolicyInfo getWeightedPolicyInfo() { + return weightedPolicyInfo; + } + + @VisibleForTesting + public void setWeightedPolicyInfo( + WeightedPolicyInfo weightedPolicyInfo) { + this.weightedPolicyInfo = weightedPolicyInfo; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/package-info.java new file mode 100644 index 00000000000..9515c011811 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/manager/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** Various implementation of FederationPolicyManager. **/ +package org.apache.hadoop.yarn.server.federation.policies.manager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/package-info.java new file mode 100644 index 00000000000..fa3fcc5ef9b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** Federation Policies. **/ +package org.apache.hadoop.yarn.server.federation.policies; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/AbstractRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/AbstractRouterPolicy.java new file mode 100644 index 00000000000..730fb417f88 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/AbstractRouterPolicy.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.federation.policies.AbstractConfigurableFederationPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; + +/** + * Base abstract class for {@link FederationRouterPolicy} implementations, that + * provides common validation for reinitialization. + */ +public abstract class AbstractRouterPolicy extends + AbstractConfigurableFederationPolicy implements FederationRouterPolicy { + + @Override + public void validate(WeightedPolicyInfo newPolicyInfo) + throws FederationPolicyInitializationException { + super.validate(newPolicyInfo); + Map newWeights = + newPolicyInfo.getRouterPolicyWeights(); + if (newWeights == null || newWeights.size() < 1) { + throw new FederationPolicyInitializationException( + "Weight vector cannot be null/empty."); + } + } + + public void validate(ApplicationSubmissionContext appSubmissionContext) + throws FederationPolicyException { + + if (appSubmissionContext == null) { + throw new FederationPolicyException( + "Cannot route an application with null context."); + } + + // if the queue is not specified we set it to default value, to be + // compatible with YARN behavior. + String queue = appSubmissionContext.getQueue(); + if (queue == null) { + appSubmissionContext.setQueue(YarnConfiguration.DEFAULT_QUEUE_NAME); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/FederationRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/FederationRouterPolicy.java new file mode 100644 index 00000000000..9325bd8ca2a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/FederationRouterPolicy.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.List; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * Implements the logic for determining the routing of an application submission + * based on a policy. + */ +public interface FederationRouterPolicy extends ConfigurableFederationPolicy { + + /** + * Determines the sub-cluster that the user application submission should be + * routed to. + * + * @param appSubmissionContext the {@link ApplicationSubmissionContext} that + * has to be routed to an appropriate subCluster for execution. + * + * @param blackListSubClusters the list of subClusters as identified by + * {@link SubClusterId} to blackList from the selection of the home + * subCluster. + * + * @return the {@link SubClusterId} that will be the "home" for this + * application. + * + * @throws YarnException if the policy cannot determine a viable subcluster. + */ + SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blackListSubClusters) throws YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/HashBasedRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/HashBasedRouterPolicy.java new file mode 100644 index 00000000000..cc118806653 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/HashBasedRouterPolicy.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +/** + * This {@link FederationRouterPolicy} pick a subcluster based on the hash of + * the job's queue name. Useful to provide a default behavior when too many + * queues exist in a system. This also ensures that all jobs belonging to a + * queue are mapped to the same sub-cluster (likely help with locality). + */ +public class HashBasedRouterPolicy extends AbstractRouterPolicy { + + @Override + public void reinitialize( + FederationPolicyInitializationContext federationPolicyContext) + throws FederationPolicyInitializationException { + FederationPolicyInitializationContextValidator + .validate(federationPolicyContext, this.getClass().getCanonicalName()); + + // note: this overrides BaseRouterPolicy and ignores the weights + setPolicyContext(federationPolicyContext); + } + + /** + * Simply picks from alphabetically-sorted active subclusters based on the + * hash of quey name. Jobs of the same queue will all be routed to the same + * sub-cluster, as far as the number of active sub-cluster and their names + * remain the same. + * + * @param appSubmissionContext the {@link ApplicationSubmissionContext} that + * has to be routed to an appropriate subCluster for execution. + * + * @param blackListSubClusters the list of subClusters as identified by + * {@link SubClusterId} to blackList from the selection of the home + * subCluster. + * + * @return a hash-based chosen {@link SubClusterId} that will be the "home" + * for this application. + * + * @throws YarnException if there are no active subclusters. + */ + @Override + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blackListSubClusters) throws YarnException { + + // throws if no active subclusters available + Map activeSubclusters = + getActiveSubclusters(); + + FederationPolicyUtils.validateSubClusterAvailability( + new ArrayList(activeSubclusters.keySet()), + blackListSubClusters); + + if (blackListSubClusters != null) { + + // Remove from the active SubClusters from StateStore the blacklisted ones + for (SubClusterId scId : blackListSubClusters) { + activeSubclusters.remove(scId); + } + } + + validate(appSubmissionContext); + + int chosenPosition = Math.abs( + appSubmissionContext.getQueue().hashCode() % activeSubclusters.size()); + + List list = new ArrayList<>(activeSubclusters.keySet()); + Collections.sort(list); + return list.get(chosenPosition); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java new file mode 100644 index 00000000000..06e445bd60c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.codehaus.jettison.json.JSONException; +import org.codehaus.jettison.json.JSONObject; + +/** + * This implements a simple load-balancing policy. The policy "weights" are + * binary 0/1 values that enable/disable each sub-cluster, and the policy peaks + * the sub-cluster with the least load to forward this application. + */ +public class LoadBasedRouterPolicy extends AbstractRouterPolicy { + + @Override + public void reinitialize(FederationPolicyInitializationContext policyContext) + throws FederationPolicyInitializationException { + + // remember old policyInfo + WeightedPolicyInfo tempPolicy = getPolicyInfo(); + + // attempt new initialization + super.reinitialize(policyContext); + + // check extra constraints + for (Float weight : getPolicyInfo().getRouterPolicyWeights().values()) { + if (weight != 0 && weight != 1) { + // reset to old policyInfo if check fails + setPolicyInfo(tempPolicy); + throw new FederationPolicyInitializationException( + this.getClass().getCanonicalName() + + " policy expects all weights to be either " + + "\"0\" or \"1\""); + } + } + } + + @Override + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blacklist) throws YarnException { + + // null checks and default-queue behavior + validate(appSubmissionContext); + + Map activeSubclusters = + getActiveSubclusters(); + + FederationPolicyUtils.validateSubClusterAvailability( + new ArrayList(activeSubclusters.keySet()), blacklist); + + Map weights = + getPolicyInfo().getRouterPolicyWeights(); + SubClusterIdInfo chosen = null; + long currBestMem = -1; + for (Map.Entry entry : activeSubclusters + .entrySet()) { + if (blacklist != null && blacklist.contains(entry.getKey())) { + continue; + } + SubClusterIdInfo id = new SubClusterIdInfo(entry.getKey()); + if (weights.containsKey(id) && weights.get(id) > 0) { + long availableMemory = getAvailableMemory(entry.getValue()); + if (availableMemory > currBestMem) { + currBestMem = availableMemory; + chosen = id; + } + } + } + + return chosen.toId(); + } + + private long getAvailableMemory(SubClusterInfo value) throws YarnException { + try { + long mem = -1; + JSONObject obj = new JSONObject(value.getCapability()); + mem = obj.getJSONObject("clusterMetrics").getLong("availableMB"); + return mem; + } catch (JSONException j) { + throw new YarnException("FederationSubCluserInfo cannot be parsed", j); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java new file mode 100644 index 00000000000..a1f7666a9ff --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +/** + * This implements a policy that interprets "weights" as a ordered list of + * preferences among sub-clusters. Highest weight among active subclusters is + * chosen. + */ +public class PriorityRouterPolicy extends AbstractRouterPolicy { + + @Override + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blacklist) throws YarnException { + + // null checks and default-queue behavior + validate(appSubmissionContext); + + Map activeSubclusters = + getActiveSubclusters(); + + FederationPolicyUtils.validateSubClusterAvailability( + new ArrayList(activeSubclusters.keySet()), blacklist); + + // This finds the sub-cluster with the highest weight among the + // currently active ones. + Map weights = + getPolicyInfo().getRouterPolicyWeights(); + SubClusterId chosen = null; + Float currentBest = Float.MIN_VALUE; + for (SubClusterId id : activeSubclusters.keySet()) { + SubClusterIdInfo idInfo = new SubClusterIdInfo(id); + if (blacklist != null && blacklist.contains(id)) { + continue; + } + if (weights.containsKey(idInfo) && weights.get(idInfo) > currentBest) { + currentBest = weights.get(idInfo); + chosen = id; + } + } + + return chosen; + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/RejectRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/RejectRouterPolicy.java new file mode 100644 index 00000000000..b4c01927024 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/RejectRouterPolicy.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.List; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * This {@link FederationRouterPolicy} simply rejects all incoming requests. + * This is useful to prevent applications running in a queue to be run anywhere + * in the federated cluster. + */ +public class RejectRouterPolicy extends AbstractRouterPolicy { + + @Override + public void reinitialize( + FederationPolicyInitializationContext federationPolicyContext) + throws FederationPolicyInitializationException { + FederationPolicyInitializationContextValidator + .validate(federationPolicyContext, this.getClass().getCanonicalName()); + setPolicyContext(federationPolicyContext); + } + + /** + * The policy always reject requests. + * + * @param appSubmissionContext the {@link ApplicationSubmissionContext} that + * has to be routed to an appropriate subCluster for execution. + * + * @param blackListSubClusters the list of subClusters as identified by + * {@link SubClusterId} to blackList from the selection of the home + * subCluster. + * + * @return (never). + * + * @throws YarnException (always) to prevent applications in this queue to be + * run anywhere in the federated cluster. + */ + @Override + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blackListSubClusters) throws YarnException { + + // run standard validation, as error might differ + validate(appSubmissionContext); + + throw new FederationPolicyException("The policy configured for this queue" + + " (" + appSubmissionContext.getQueue() + ") reject all routing " + + "requests by construction. Application " + + appSubmissionContext.getApplicationId() + + " cannot be routed to any RM."); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/UniformRandomRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/UniformRandomRouterPolicy.java new file mode 100644 index 00000000000..7a8be91fcd0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/UniformRandomRouterPolicy.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +/** + * This simple policy picks at uniform random among any of the currently active + * subclusters. This policy is easy to use and good for testing. + * + * NOTE: this is "almost" subsumed by the {@code WeightedRandomRouterPolicy}. + * Behavior only diverges when there are active sub-clusters that are not part + * of the "weights", in which case the {@link UniformRandomRouterPolicy} send + * load to them, while {@code WeightedRandomRouterPolicy} does not. + */ +public class UniformRandomRouterPolicy extends AbstractRouterPolicy { + + private Random rand; + + public UniformRandomRouterPolicy() { + rand = new Random(System.currentTimeMillis()); + } + + @Override + public void reinitialize(FederationPolicyInitializationContext policyContext) + throws FederationPolicyInitializationException { + FederationPolicyInitializationContextValidator.validate(policyContext, + this.getClass().getCanonicalName()); + + // note: this overrides AbstractRouterPolicy and ignores the weights + + setPolicyContext(policyContext); + } + + /** + * Simply picks a random active subCluster to start the AM (this does NOT + * depend on the weights in the policy). + * + * @param appSubmissionContext the {@link ApplicationSubmissionContext} that + * has to be routed to an appropriate subCluster for execution. + * + * @param blackListSubClusters the list of subClusters as identified by + * {@link SubClusterId} to blackList from the selection of the home + * subCluster. + * + * @return a randomly chosen subcluster. + * + * @throws YarnException if there are no active subclusters. + */ + @Override + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blackListSubClusters) throws YarnException { + + // null checks and default-queue behavior + validate(appSubmissionContext); + + Map activeSubclusters = + getActiveSubclusters(); + + List list = new ArrayList<>(activeSubclusters.keySet()); + + FederationPolicyUtils.validateSubClusterAvailability(list, + blackListSubClusters); + + if (blackListSubClusters != null) { + + // Remove from the active SubClusters from StateStore the blacklisted ones + for (SubClusterId scId : blackListSubClusters) { + list.remove(scId); + } + } + + return list.get(rand.nextInt(list.size())); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/WeightedRandomRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/WeightedRandomRouterPolicy.java new file mode 100644 index 00000000000..aec75760414 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/WeightedRandomRouterPolicy.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This policy implements a weighted random sample among currently active + * sub-clusters. + */ +public class WeightedRandomRouterPolicy extends AbstractRouterPolicy { + + private static final Logger LOG = + LoggerFactory.getLogger(WeightedRandomRouterPolicy.class); + private Random rand = new Random(System.currentTimeMillis()); + + @Override + public SubClusterId getHomeSubcluster( + ApplicationSubmissionContext appSubmissionContext, + List blacklist) throws YarnException { + + // null checks and default-queue behavior + validate(appSubmissionContext); + + Map activeSubclusters = + getActiveSubclusters(); + + FederationPolicyUtils.validateSubClusterAvailability( + new ArrayList(activeSubclusters.keySet()), blacklist); + + // note: we cannot pre-compute the weights, as the set of activeSubcluster + // changes dynamically (and this would unfairly spread the load to + // sub-clusters adjacent to an inactive one), hence we need to count/scan + // the list and based on weight pick the next sub-cluster. + Map weights = + getPolicyInfo().getRouterPolicyWeights(); + + float totActiveWeight = 0; + for (Map.Entry entry : weights.entrySet()) { + if (blacklist != null && blacklist.contains(entry.getKey().toId())) { + continue; + } + if (entry.getKey() != null + && activeSubclusters.containsKey(entry.getKey().toId())) { + totActiveWeight += entry.getValue(); + } + } + float lookupValue = rand.nextFloat() * totActiveWeight; + + for (SubClusterId id : activeSubclusters.keySet()) { + if (blacklist != null && blacklist.contains(id)) { + continue; + } + SubClusterIdInfo idInfo = new SubClusterIdInfo(id); + if (weights.containsKey(idInfo)) { + lookupValue -= weights.get(idInfo); + } + if (lookupValue <= 0) { + return id; + } + } + // should never happen + return null; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/package-info.java new file mode 100644 index 00000000000..e445ac33377 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** Router policies. **/ +package org.apache.hadoop.yarn.server.federation.policies.router; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java new file mode 100644 index 00000000000..bccff2d1a30 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java @@ -0,0 +1,67 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.federation.resolver; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.Map; + +/** + * Partial implementation of {@link SubClusterResolver}, containing basic + * implementations of the read methods. + */ +public abstract class AbstractSubClusterResolver implements SubClusterResolver { + private Map nodeToSubCluster = + new ConcurrentHashMap(); + private Map> rackToSubClusters = + new ConcurrentHashMap>(); + + @Override + public SubClusterId getSubClusterForNode(String nodename) + throws YarnException { + SubClusterId subClusterId = this.nodeToSubCluster.get(nodename); + + if (subClusterId == null) { + throw new YarnException("Cannot find subClusterId for node " + nodename); + } + + return subClusterId; + } + + @Override + public Set getSubClustersForRack(String rackname) + throws YarnException { + if (!rackToSubClusters.containsKey(rackname)) { + throw new YarnException("Cannot resolve rack " + rackname); + } + + return rackToSubClusters.get(rackname); + } + + public Map getNodeToSubCluster() { + return nodeToSubCluster; + } + + public Map> getRackToSubClusters() { + return rackToSubClusters; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/DefaultSubClusterResolverImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/DefaultSubClusterResolverImpl.java new file mode 100644 index 00000000000..d3c5c269abb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/DefaultSubClusterResolverImpl.java @@ -0,0 +1,164 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.federation.resolver; + +import java.io.BufferedReader; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * Default simple sub-cluster and rack resolver class. + * + * This class expects a three-column comma separated file, specified in + * yarn.federation.machine-list. Each line of the file should be of the format: + * + * nodeName, subClusterId, rackName + * + * Lines that do not follow this format will be ignored. This resolver only + * loads the file when load() is explicitly called; it will not react to changes + * to the file. + * + * It is case-insensitive on the rack and node names and ignores + * leading/trailing whitespace. + * + */ +public class DefaultSubClusterResolverImpl extends AbstractSubClusterResolver + implements SubClusterResolver { + + private static final Logger LOG = + LoggerFactory.getLogger(DefaultSubClusterResolverImpl.class); + private Configuration conf; + + // Index of the node hostname in the machine info file. + private static final int NODE_NAME_INDEX = 0; + + // Index of the sub-cluster ID in the machine info file. + private static final int SUBCLUSTER_ID_INDEX = 1; + + // Index of the rack name ID in the machine info file. + private static final int RACK_NAME_INDEX = 2; + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return this.conf; + } + + @Override + public SubClusterId getSubClusterForNode(String nodename) + throws YarnException { + return super.getSubClusterForNode(nodename.toUpperCase()); + } + + @Override + public void load() { + String fileName = + this.conf.get(YarnConfiguration.FEDERATION_MACHINE_LIST, ""); + + try { + if (fileName == null || fileName.trim().length() == 0) { + LOG.info( + "The machine list file path is not specified in the configuration"); + return; + } + + Path file = null; + BufferedReader reader = null; + + try { + file = Paths.get(fileName); + } catch (InvalidPathException e) { + LOG.info("The configured machine list file path {} does not exist", + fileName); + return; + } + + try { + reader = Files.newBufferedReader(file, Charset.defaultCharset()); + String line = null; + while ((line = reader.readLine()) != null) { + String[] tokens = line.split(","); + if (tokens.length == 3) { + + String nodeName = tokens[NODE_NAME_INDEX].trim().toUpperCase(); + SubClusterId subClusterId = + SubClusterId.newInstance(tokens[SUBCLUSTER_ID_INDEX].trim()); + String rackName = tokens[RACK_NAME_INDEX].trim().toUpperCase(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Loading node into resolver: {} --> {}", nodeName, + subClusterId); + LOG.debug("Loading rack into resolver: {} --> {} ", rackName, + subClusterId); + } + + this.getNodeToSubCluster().put(nodeName, subClusterId); + loadRackToSubCluster(rackName, subClusterId); + } else { + LOG.warn("Skipping malformed line in machine list: " + line); + } + } + } finally { + if (reader != null) { + reader.close(); + } + } + LOG.info("Successfully loaded file {}", fileName); + + } catch (Exception e) { + LOG.error("Failed to parse file " + fileName, e); + } + } + + private void loadRackToSubCluster(String rackName, + SubClusterId subClusterId) { + String rackNameUpper = rackName.toUpperCase(); + + if (!this.getRackToSubClusters().containsKey(rackNameUpper)) { + this.getRackToSubClusters().put(rackNameUpper, + new HashSet()); + } + + this.getRackToSubClusters().get(rackNameUpper).add(subClusterId); + + } + + @Override + public Set getSubClustersForRack(String rackname) + throws YarnException { + return super.getSubClustersForRack(rackname.toUpperCase()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/SubClusterResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/SubClusterResolver.java new file mode 100644 index 00000000000..612d39685da --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/SubClusterResolver.java @@ -0,0 +1,58 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.federation.resolver; + +import java.util.Set; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * An utility that helps to determine the sub-cluster that a specified node or + * rack belongs to. All implementing classes should be thread-safe. + */ +public interface SubClusterResolver extends Configurable { + + /** + * Obtain the sub-cluster that a specified node belongs to. + * + * @param nodename the node whose sub-cluster is to be determined + * @return the sub-cluster as identified by the {@link SubClusterId} that the + * node belongs to + * @throws YarnException if the node's sub-cluster cannot be resolved + */ + SubClusterId getSubClusterForNode(String nodename) throws YarnException; + + /** + * Obtain the sub-clusters that have nodes on a specified rack. + * + * @param rackname the name of the rack + * @return the sub-clusters as identified by the {@link SubClusterId} that + * have nodes on the given rack + * @throws YarnException if the sub-cluster of any node on the rack cannot be + * resolved, or if the rack name is not recognized + */ + Set getSubClustersForRack(String rackname) throws YarnException; + + /** + * Load the nodes to subCluster mapping from the file. + */ + void load(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/package-info.java new file mode 100644 index 00000000000..c0426608a01 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.resolver; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java new file mode 100644 index 00000000000..ace2457e368 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; + +/** + * FederationApplicationHomeSubClusterStore maintains the state of all + * Applications that have been submitted to the federated cluster. + * + * * + *

+ * The mapping details contains: + *

    + *
  • {@code ApplicationId}
  • + *
  • {@code SubClusterId}
  • + *
+ * + */ +@Private +@Unstable +public interface FederationApplicationHomeSubClusterStore { + + /** + * Register the home {@code SubClusterId} of the newly submitted + * {@code ApplicationId}. Currently response is empty if the operation was + * successful, if not an exception reporting reason for a failure. If a + * mapping for the application already existed, the {@code SubClusterId} in + * this response will return the existing mapping which might be different + * from that in the {@code AddApplicationHomeSubClusterRequest}. + * + * @param request the request to register a new application with its home + * sub-cluster + * @return upon successful registration of the application in the StateStore, + * {@code AddApplicationHomeSubClusterRequest} containing the home + * sub-cluster of the application. Otherwise, an exception reporting + * reason for a failure + * @throws YarnException if the request is invalid/fails + */ + AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest request) throws YarnException; + + /** + * Update the home {@code SubClusterId} of a previously submitted + * {@code ApplicationId}. Currently response is empty if the operation was + * successful, if not an exception reporting reason for a failure. + * + * @param request the request to update the home sub-cluster of an + * application. + * @return empty on successful update of the application in the StateStore, if + * not an exception reporting reason for a failure + * @throws YarnException if the request is invalid/fails + */ + UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( + UpdateApplicationHomeSubClusterRequest request) throws YarnException; + + /** + * Get information about the application identified by the input + * {@code ApplicationId}. + * + * @param request contains the application queried + * @return {@code ApplicationHomeSubCluster} containing the application's home + * subcluster + * @throws YarnException if the request is invalid/fails + */ + GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest request) throws YarnException; + + /** + * Get the {@code ApplicationHomeSubCluster} list representing the mapping of + * all submitted applications to it's home sub-cluster. + * + * @param request empty representing all applications + * @return the mapping of all submitted application to it's home sub-cluster + * @throws YarnException if the request is invalid/fails + */ + GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( + GetApplicationsHomeSubClusterRequest request) throws YarnException; + + /** + * Delete the mapping of home {@code SubClusterId} of a previously submitted + * {@code ApplicationId}. Currently response is empty if the operation was + * successful, if not an exception reporting reason for a failure. + * + * @param request the request to delete the home sub-cluster of an + * application. + * @return empty on successful update of the application in the StateStore, if + * not an exception reporting reason for a failure + * @throws YarnException if the request is invalid/fails + */ + DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( + DeleteApplicationHomeSubClusterRequest request) throws YarnException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java new file mode 100644 index 00000000000..49ec3bf31db --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; + +/** + * FederationMembershipStateStore maintains the state of all + * subcluster(s) as encapsulated by {@code SubClusterInfo} for all the + * subcluster(s) that are participating in federation. + */ +@Private +@Unstable +public interface FederationMembershipStateStore { + + /** + * Register a subcluster by publishing capabilities as represented by + * {@code SubClusterInfo} to indicate participation in federation. This is + * typically done during initialization or restart/failover of the + * subcluster's ResourceManager. Upon successful registration, an + * identifier for the subcluster which is unique across the federated + * cluster is returned. The identifier is static, i.e. preserved across + * restarts and failover. + * + * @param registerSubClusterRequest the capabilities of the subcluster that + * wants to participate in federation. The subcluster id is also + * specified in case registration is triggered by restart/failover + * @return response empty on successfully if registration was successful + * @throws YarnException if the request is invalid/fails + */ + SubClusterRegisterResponse registerSubCluster( + SubClusterRegisterRequest registerSubClusterRequest) throws YarnException; + + /** + * Deregister a subcluster identified by {@code SubClusterId} to + * change state in federation. This can be done to mark the sub cluster lost, + * deregistered, or decommissioned. + * + * @param subClusterDeregisterRequest - the request to deregister the + * sub-cluster from federation. + * @return response empty on successfully deregistering the subcluster state + * @throws YarnException if the request is invalid/fails + */ + SubClusterDeregisterResponse deregisterSubCluster( + SubClusterDeregisterRequest subClusterDeregisterRequest) + throws YarnException; + + /** + * Periodic heartbeat from a ResourceManager participating in + * federation to indicate liveliness. The heartbeat publishes the current + * capabilities as represented by {@code SubClusterInfo} of the subcluster. + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + * + * @param subClusterHeartbeatRequest the capabilities of the subcluster that + * wants to keep alive its participation in federation + * @return response currently empty on if heartbeat was successfully processed + * @throws YarnException if the request is invalid/fails + */ + SubClusterHeartbeatResponse subClusterHeartbeat( + SubClusterHeartbeatRequest subClusterHeartbeatRequest) + throws YarnException; + + /** + * Get the membership information of subcluster as identified by + * {@code SubClusterId}. The membership information includes the cluster + * endpoint and current capabilities as represented by {@code SubClusterInfo}. + * + * @param subClusterRequest the subcluster whose information is required + * @return the {@code SubClusterInfo}, or {@code null} if there is no mapping + * for the subcluster + * @throws YarnException if the request is invalid/fails + */ + GetSubClusterInfoResponse getSubCluster( + GetSubClusterInfoRequest subClusterRequest) throws YarnException; + + /** + * Get the membership information of all the subclusters that are + * currently participating in federation. The membership information includes + * the cluster endpoint and current capabilities as represented by + * {@code SubClusterInfo}. + * + * @param subClustersRequest request for sub-clusters information + * @return a map of {@code SubClusterInfo} keyed by the {@code SubClusterId} + * @throws YarnException if the request is invalid/fails + */ + GetSubClustersInfoResponse getSubClusters( + GetSubClustersInfoRequest subClustersRequest) throws YarnException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java new file mode 100644 index 00000000000..b0e03a6f33f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; + +/** + * The FederationPolicyStore provides a key-value interface to access the + * policies configured for the system. The key is a "queue" name, i.e., the + * system allows to configure a different policy for each queue in the system + * (though each policy can make dynamic run-time decisions on a per-job/per-task + * basis). The value is a {@code SubClusterPolicyConfiguration}, a serialized + * representation of the policy type and its parameters. + */ +@Private +@Unstable +public interface FederationPolicyStore { + + /** + * Get the policy configuration for a given queue. + * + * @param request the queue whose {@code SubClusterPolicyConfiguration} is + * required + * @return the {@code SubClusterPolicyConfiguration} for the specified queue, + * or {@code null} if there is no mapping for the queue + * @throws YarnException if the request is invalid/fails + */ + GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( + GetSubClusterPolicyConfigurationRequest request) throws YarnException; + + /** + * Set the policy configuration for a given queue. + * + * @param request the {@code SubClusterPolicyConfiguration} with the + * corresponding queue + * @return response empty on successfully updating the + * {@code SubClusterPolicyConfiguration} for the specified queue + * @throws YarnException if the request is invalid/fails + */ + SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( + SetSubClusterPolicyConfigurationRequest request) throws YarnException; + + /** + * Get a map of all queue-to-policy configurations. + * + * @param request empty to represent all configured queues in the system + * @return the policies for all currently active queues in the system + * @throws YarnException if the request is invalid/fails + */ + GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest request) throws YarnException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationStateStore.java new file mode 100644 index 00000000000..9397e9c2404 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationStateStore.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.records.Version; + +/** + * FederationStore extends the three interfaces used to coordinate the state of + * a federated cluster: {@link FederationApplicationHomeSubClusterStore}, + * {@link FederationMembershipStateStore}, and {@link FederationPolicyStore}. + * + */ +public interface FederationStateStore + extends FederationApplicationHomeSubClusterStore, + FederationMembershipStateStore, FederationPolicyStore { + + /** + * Initialize the FederationStore. + * + * @param conf the cluster configuration + * @throws YarnException if initialization fails + */ + void init(Configuration conf) throws YarnException; + + /** + * Perform any cleanup operations of the StateStore. + * + * @throws Exception if cleanup fails + */ + void close() throws Exception; + + /** + * Get the {@link Version} of the underlying federation state store client. + * + * @return the {@link Version} of the underlying federation store client + */ + Version getCurrentVersion(); + + /** + * Load the version information from the federation state store. + * + * @return the {@link Version} of the federation state store + */ + Version loadVersion(); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreException.java new file mode 100644 index 00000000000..1013ec6df2d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreException.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.exception; + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * Exception thrown by the FederationStateStore. + * + */ +public class FederationStateStoreException extends YarnException { + + /** + * IDE auto-generated. + */ + private static final long serialVersionUID = -6453353714832159296L; + + public FederationStateStoreException() { + super(); + } + + public FederationStateStoreException(String message) { + super(message); + } + + public FederationStateStoreException(Throwable cause) { + super(cause); + } + + public FederationStateStoreException(String message, Throwable cause) { + super(message, cause); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreInvalidInputException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreInvalidInputException.java new file mode 100644 index 00000000000..edf78376550 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreInvalidInputException.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.exception; + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * Exception thrown by the {@code FederationMembershipStateStoreInputValidator}, + * {@code FederationApplicationHomeSubClusterStoreInputValidator}, + * {@code FederationPolicyStoreInputValidator} if the input is invalid. + * + */ +public class FederationStateStoreInvalidInputException extends YarnException { + + /** + * IDE auto-generated. + */ + private static final long serialVersionUID = -7352144682711430801L; + + public FederationStateStoreInvalidInputException(Throwable cause) { + super(cause); + } + + public FederationStateStoreInvalidInputException(String message) { + super(message); + } + + public FederationStateStoreInvalidInputException(String message, + Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreRetriableException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreRetriableException.java new file mode 100644 index 00000000000..19d6750781e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreRetriableException.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.exception; + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * Exception thrown by the {@code FederationStateStore}, if it is a retriable + * exception. + * + */ +public class FederationStateStoreRetriableException extends YarnException { + + private static final long serialVersionUID = 1L; + + public FederationStateStoreRetriableException(Throwable cause) { + super(cause); + } + + public FederationStateStoreRetriableException(String message) { + super(message); + } + + public FederationStateStoreRetriableException(String message, + Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/package-info.java new file mode 100644 index 00000000000..727606f3162 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store.exception; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java new file mode 100644 index 00000000000..7c06256a413 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java @@ -0,0 +1,315 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TimeZone; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.util.MonotonicClock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * In-memory implementation of {@link FederationStateStore}. + */ +public class MemoryFederationStateStore implements FederationStateStore { + + private Map membership; + private Map applications; + private Map policies; + + private final MonotonicClock clock = new MonotonicClock(); + + public static final Logger LOG = + LoggerFactory.getLogger(MemoryFederationStateStore.class); + + @Override + public void init(Configuration conf) { + membership = new ConcurrentHashMap(); + applications = new ConcurrentHashMap(); + policies = new ConcurrentHashMap(); + } + + @Override + public void close() { + membership = null; + applications = null; + policies = null; + } + + @Override + public SubClusterRegisterResponse registerSubCluster( + SubClusterRegisterRequest request) throws YarnException { + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterInfo subClusterInfo = request.getSubClusterInfo(); + + long currentTime = + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis(); + + SubClusterInfo subClusterInfoToSave = + SubClusterInfo.newInstance(subClusterInfo.getSubClusterId(), + subClusterInfo.getAMRMServiceAddress(), + subClusterInfo.getClientRMServiceAddress(), + subClusterInfo.getRMAdminServiceAddress(), + subClusterInfo.getRMWebServiceAddress(), currentTime, + subClusterInfo.getState(), subClusterInfo.getLastStartTime(), + subClusterInfo.getCapability()); + + membership.put(subClusterInfo.getSubClusterId(), subClusterInfoToSave); + return SubClusterRegisterResponse.newInstance(); + } + + @Override + public SubClusterDeregisterResponse deregisterSubCluster( + SubClusterDeregisterRequest request) throws YarnException { + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterInfo subClusterInfo = membership.get(request.getSubClusterId()); + if (subClusterInfo == null) { + String errMsg = + "SubCluster " + request.getSubClusterId().toString() + " not found"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } else { + subClusterInfo.setState(request.getState()); + } + + return SubClusterDeregisterResponse.newInstance(); + } + + @Override + public SubClusterHeartbeatResponse subClusterHeartbeat( + SubClusterHeartbeatRequest request) throws YarnException { + + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterId subClusterId = request.getSubClusterId(); + SubClusterInfo subClusterInfo = membership.get(subClusterId); + + if (subClusterInfo == null) { + String errMsg = "SubCluster " + subClusterId.toString() + + " does not exist; cannot heartbeat"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + long currentTime = + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis(); + + subClusterInfo.setLastHeartBeat(currentTime); + subClusterInfo.setState(request.getState()); + subClusterInfo.setCapability(request.getCapability()); + + return SubClusterHeartbeatResponse.newInstance(); + } + + @Override + public GetSubClusterInfoResponse getSubCluster( + GetSubClusterInfoRequest request) throws YarnException { + + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterId subClusterId = request.getSubClusterId(); + if (!membership.containsKey(subClusterId)) { + LOG.warn("The queried SubCluster: {} does not exist.", subClusterId); + return null; + } + + return GetSubClusterInfoResponse.newInstance(membership.get(subClusterId)); + } + + @Override + public GetSubClustersInfoResponse getSubClusters( + GetSubClustersInfoRequest request) throws YarnException { + List result = new ArrayList(); + + for (SubClusterInfo info : membership.values()) { + if (!request.getFilterInactiveSubClusters() + || info.getState().isActive()) { + result.add(info); + } + } + return GetSubClustersInfoResponse.newInstance(result); + } + + // FederationApplicationHomeSubClusterStore methods + + @Override + public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest request) throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationId appId = + request.getApplicationHomeSubCluster().getApplicationId(); + + if (!applications.containsKey(appId)) { + applications.put(appId, + request.getApplicationHomeSubCluster().getHomeSubCluster()); + } + + return AddApplicationHomeSubClusterResponse + .newInstance(applications.get(appId)); + } + + @Override + public UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( + UpdateApplicationHomeSubClusterRequest request) throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationId appId = + request.getApplicationHomeSubCluster().getApplicationId(); + if (!applications.containsKey(appId)) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + applications.put(appId, + request.getApplicationHomeSubCluster().getHomeSubCluster()); + return UpdateApplicationHomeSubClusterResponse.newInstance(); + } + + @Override + public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest request) throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationId appId = request.getApplicationId(); + if (!applications.containsKey(appId)) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + return GetApplicationHomeSubClusterResponse.newInstance( + ApplicationHomeSubCluster.newInstance(appId, applications.get(appId))); + } + + @Override + public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( + GetApplicationsHomeSubClusterRequest request) throws YarnException { + List result = + new ArrayList(); + for (Entry e : applications.entrySet()) { + result + .add(ApplicationHomeSubCluster.newInstance(e.getKey(), e.getValue())); + } + + GetApplicationsHomeSubClusterResponse.newInstance(result); + return GetApplicationsHomeSubClusterResponse.newInstance(result); + } + + @Override + public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( + DeleteApplicationHomeSubClusterRequest request) throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationId appId = request.getApplicationId(); + if (!applications.containsKey(appId)) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + applications.remove(appId); + return DeleteApplicationHomeSubClusterResponse.newInstance(); + } + + @Override + public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( + GetSubClusterPolicyConfigurationRequest request) throws YarnException { + + FederationPolicyStoreInputValidator.validate(request); + String queue = request.getQueue(); + if (!policies.containsKey(queue)) { + LOG.warn("Policy for queue: {} does not exist.", queue); + return null; + } + + return GetSubClusterPolicyConfigurationResponse + .newInstance(policies.get(queue)); + } + + @Override + public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( + SetSubClusterPolicyConfigurationRequest request) throws YarnException { + + FederationPolicyStoreInputValidator.validate(request); + policies.put(request.getPolicyConfiguration().getQueue(), + request.getPolicyConfiguration()); + return SetSubClusterPolicyConfigurationResponse.newInstance(); + } + + @Override + public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest request) throws YarnException { + ArrayList result = + new ArrayList(); + for (SubClusterPolicyConfiguration policy : policies.values()) { + result.add(policy); + } + return GetSubClusterPoliciesConfigurationsResponse.newInstance(result); + } + + @Override + public Version getCurrentVersion() { + return null; + } + + @Override + public Version loadVersion() { + return null; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java new file mode 100644 index 00000000000..63d8e424605 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -0,0 +1,942 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import java.nio.ByteBuffer; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.TimeZone; + +import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils; +import org.apache.hadoop.yarn.server.records.Version; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.zaxxer.hikari.HikariDataSource; + +/** + * SQL implementation of {@link FederationStateStore}. + */ +public class SQLFederationStateStore implements FederationStateStore { + + public static final Logger LOG = + LoggerFactory.getLogger(SQLFederationStateStore.class); + + // Stored procedures patterns + + private static final String CALL_SP_REGISTER_SUBCLUSTER = + "{call sp_registerSubCluster(?, ?, ?, ?, ?, ?, ?, ?, ?)}"; + + private static final String CALL_SP_DEREGISTER_SUBCLUSTER = + "{call sp_deregisterSubCluster(?, ?, ?)}"; + + private static final String CALL_SP_GET_SUBCLUSTER = + "{call sp_getSubCluster(?, ?, ?, ?, ?, ?, ?, ?, ?)}"; + + private static final String CALL_SP_GET_SUBCLUSTERS = + "{call sp_getSubClusters()}"; + + private static final String CALL_SP_SUBCLUSTER_HEARTBEAT = + "{call sp_subClusterHeartbeat(?, ?, ?, ?)}"; + + private static final String CALL_SP_ADD_APPLICATION_HOME_SUBCLUSTER = + "{call sp_addApplicationHomeSubCluster(?, ?, ?, ?)}"; + + private static final String CALL_SP_UPDATE_APPLICATION_HOME_SUBCLUSTER = + "{call sp_updateApplicationHomeSubCluster(?, ?, ?)}"; + + private static final String CALL_SP_DELETE_APPLICATION_HOME_SUBCLUSTER = + "{call sp_deleteApplicationHomeSubCluster(?, ?)}"; + + private static final String CALL_SP_GET_APPLICATION_HOME_SUBCLUSTER = + "{call sp_getApplicationHomeSubCluster(?, ?)}"; + + private static final String CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER = + "{call sp_getApplicationsHomeSubCluster()}"; + + private static final String CALL_SP_SET_POLICY_CONFIGURATION = + "{call sp_setPolicyConfiguration(?, ?, ?, ?)}"; + + private static final String CALL_SP_GET_POLICY_CONFIGURATION = + "{call sp_getPolicyConfiguration(?, ?, ?)}"; + + private static final String CALL_SP_GET_POLICIES_CONFIGURATIONS = + "{call sp_getPoliciesConfigurations()}"; + + private Calendar utcCalendar = + Calendar.getInstance(TimeZone.getTimeZone("UTC")); + + // SQL database configurations + + private String userName; + private String password; + private String driverClass; + private String url; + private int maximumPoolSize; + private HikariDataSource dataSource = null; + + @Override + public void init(Configuration conf) throws YarnException { + driverClass = + conf.get(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_SQL_JDBC_CLASS); + maximumPoolSize = + conf.getInt(YarnConfiguration.FEDERATION_STATESTORE_SQL_MAXCONNECTIONS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_SQL_MAXCONNECTIONS); + + // An helper method avoids to assign a null value to these property + userName = conf.get(YarnConfiguration.FEDERATION_STATESTORE_SQL_USERNAME); + password = conf.get(YarnConfiguration.FEDERATION_STATESTORE_SQL_PASSWORD); + url = conf.get(YarnConfiguration.FEDERATION_STATESTORE_SQL_URL); + + try { + Class.forName(driverClass); + } catch (ClassNotFoundException e) { + FederationStateStoreUtils.logAndThrowException(LOG, + "Driver class not found.", e); + } + + // Create the data source to pool connections in a thread-safe manner + dataSource = new HikariDataSource(); + dataSource.setDataSourceClassName(driverClass); + FederationStateStoreUtils.setUsername(dataSource, userName); + FederationStateStoreUtils.setPassword(dataSource, password); + FederationStateStoreUtils.setProperty(dataSource, + FederationStateStoreUtils.FEDERATION_STORE_URL, url); + dataSource.setMaximumPoolSize(maximumPoolSize); + LOG.info("Initialized connection pool to the Federation StateStore " + + "database at address: " + url); + } + + @Override + public SubClusterRegisterResponse registerSubCluster( + SubClusterRegisterRequest registerSubClusterRequest) + throws YarnException { + + // Input validator + FederationMembershipStateStoreInputValidator + .validate(registerSubClusterRequest); + + CallableStatement cstmt = null; + Connection conn = null; + + SubClusterInfo subClusterInfo = + registerSubClusterRequest.getSubClusterInfo(); + SubClusterId subClusterId = subClusterInfo.getSubClusterId(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_REGISTER_SUBCLUSTER); + + // Set the parameters for the stored procedure + cstmt.setString(1, subClusterId.getId()); + cstmt.setString(2, subClusterInfo.getAMRMServiceAddress()); + cstmt.setString(3, subClusterInfo.getClientRMServiceAddress()); + cstmt.setString(4, subClusterInfo.getRMAdminServiceAddress()); + cstmt.setString(5, subClusterInfo.getRMWebServiceAddress()); + cstmt.setString(6, subClusterInfo.getState().toString()); + cstmt.setLong(7, subClusterInfo.getLastStartTime()); + cstmt.setString(8, subClusterInfo.getCapability()); + cstmt.registerOutParameter(9, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not add a new subcluster into FederationStateStore + if (cstmt.getInt(9) == 0) { + String errMsg = "SubCluster " + subClusterId + + " was not registered into the StateStore"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(9) != 1) { + String errMsg = "Wrong behavior during registration of SubCluster " + + subClusterId + " into the StateStore"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info( + "Registered the SubCluster " + subClusterId + " into the StateStore"); + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to register the SubCluster " + subClusterId + + " into the StateStore", + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return SubClusterRegisterResponse.newInstance(); + } + + @Override + public SubClusterDeregisterResponse deregisterSubCluster( + SubClusterDeregisterRequest subClusterDeregisterRequest) + throws YarnException { + + // Input validator + FederationMembershipStateStoreInputValidator + .validate(subClusterDeregisterRequest); + + CallableStatement cstmt = null; + Connection conn = null; + + SubClusterId subClusterId = subClusterDeregisterRequest.getSubClusterId(); + SubClusterState state = subClusterDeregisterRequest.getState(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_DEREGISTER_SUBCLUSTER); + + // Set the parameters for the stored procedure + cstmt.setString(1, subClusterId.getId()); + cstmt.setString(2, state.toString()); + cstmt.registerOutParameter(3, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not deregister the subcluster into FederationStateStore + if (cstmt.getInt(3) == 0) { + String errMsg = "SubCluster " + subClusterId + " not found"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(3) != 1) { + String errMsg = "Wrong behavior during deregistration of SubCluster " + + subClusterId + " from the StateStore"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info("Deregistered the SubCluster " + subClusterId + " state to " + + state.toString()); + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to deregister the sub-cluster " + subClusterId + " state to " + + state.toString(), + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return SubClusterDeregisterResponse.newInstance(); + } + + @Override + public SubClusterHeartbeatResponse subClusterHeartbeat( + SubClusterHeartbeatRequest subClusterHeartbeatRequest) + throws YarnException { + + // Input validator + FederationMembershipStateStoreInputValidator + .validate(subClusterHeartbeatRequest); + + CallableStatement cstmt = null; + Connection conn = null; + + SubClusterId subClusterId = subClusterHeartbeatRequest.getSubClusterId(); + SubClusterState state = subClusterHeartbeatRequest.getState(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_SUBCLUSTER_HEARTBEAT); + + // Set the parameters for the stored procedure + cstmt.setString(1, subClusterId.getId()); + cstmt.setString(2, state.toString()); + cstmt.setString(3, subClusterHeartbeatRequest.getCapability()); + cstmt.registerOutParameter(4, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not update the subcluster into FederationStateStore + if (cstmt.getInt(4) == 0) { + String errMsg = "SubCluster " + subClusterId.toString() + + " does not exist; cannot heartbeat"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(4) != 1) { + String errMsg = + "Wrong behavior during the heartbeat of SubCluster " + subClusterId; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info("Heartbeated the StateStore for the specified SubCluster " + + subClusterId); + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to heartbeat the StateStore for the specified SubCluster " + + subClusterId, + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return SubClusterHeartbeatResponse.newInstance(); + } + + @Override + public GetSubClusterInfoResponse getSubCluster( + GetSubClusterInfoRequest subClusterRequest) throws YarnException { + + // Input validator + FederationMembershipStateStoreInputValidator.validate(subClusterRequest); + + CallableStatement cstmt = null; + Connection conn = null; + + SubClusterInfo subClusterInfo = null; + SubClusterId subClusterId = subClusterRequest.getSubClusterId(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_GET_SUBCLUSTER); + cstmt.setString(1, subClusterId.getId()); + + // Set the parameters for the stored procedure + cstmt.registerOutParameter(2, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(3, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(4, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(5, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(6, java.sql.Types.TIMESTAMP); + cstmt.registerOutParameter(7, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(8, java.sql.Types.BIGINT); + cstmt.registerOutParameter(9, java.sql.Types.VARCHAR); + + // Execute the query + cstmt.execute(); + + String amRMAddress = cstmt.getString(2); + String clientRMAddress = cstmt.getString(3); + String rmAdminAddress = cstmt.getString(4); + String webAppAddress = cstmt.getString(5); + + // first check if the subCluster exists + if((amRMAddress == null) || (clientRMAddress == null)) { + LOG.warn("The queried SubCluster: {} does not exist.", subClusterId); + return null; + } + + Timestamp heartBeatTimeStamp = cstmt.getTimestamp(6, utcCalendar); + long lastHeartBeat = + heartBeatTimeStamp != null ? heartBeatTimeStamp.getTime() : 0; + + SubClusterState state = SubClusterState.fromString(cstmt.getString(7)); + long lastStartTime = cstmt.getLong(8); + String capability = cstmt.getString(9); + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, amRMAddress, + clientRMAddress, rmAdminAddress, webAppAddress, lastHeartBeat, state, + lastStartTime, capability); + + // Check if the output it is a valid subcluster + try { + FederationMembershipStateStoreInputValidator + .checkSubClusterInfo(subClusterInfo); + } catch (FederationStateStoreInvalidInputException e) { + String errMsg = + "SubCluster " + subClusterId.toString() + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Got the information about the specified SubCluster " + + subClusterInfo.toString()); + } + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to obtain the SubCluster information for " + subClusterId, e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return GetSubClusterInfoResponse.newInstance(subClusterInfo); + } + + @Override + public GetSubClustersInfoResponse getSubClusters( + GetSubClustersInfoRequest subClustersRequest) throws YarnException { + CallableStatement cstmt = null; + Connection conn = null; + ResultSet rs = null; + List subClusters = new ArrayList(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_GET_SUBCLUSTERS); + + // Execute the query + rs = cstmt.executeQuery(); + + while (rs.next()) { + + // Extract the output for each tuple + String subClusterName = rs.getString(1); + String amRMAddress = rs.getString(2); + String clientRMAddress = rs.getString(3); + String rmAdminAddress = rs.getString(4); + String webAppAddress = rs.getString(5); + long lastHeartBeat = rs.getTimestamp(6, utcCalendar).getTime(); + SubClusterState state = SubClusterState.fromString(rs.getString(7)); + long lastStartTime = rs.getLong(8); + String capability = rs.getString(9); + + SubClusterId subClusterId = SubClusterId.newInstance(subClusterName); + SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId, + amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress, + lastHeartBeat, state, lastStartTime, capability); + + // Check if the output it is a valid subcluster + try { + FederationMembershipStateStoreInputValidator + .checkSubClusterInfo(subClusterInfo); + } catch (FederationStateStoreInvalidInputException e) { + String errMsg = + "SubCluster " + subClusterId.toString() + " is not valid"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + // Filter the inactive + if (!subClustersRequest.getFilterInactiveSubClusters() + || subClusterInfo.getState().isActive()) { + subClusters.add(subClusterInfo); + } + } + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to obtain the information for all the SubClusters ", e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn, rs); + } + return GetSubClustersInfoResponse.newInstance(subClusters); + } + + @Override + public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest request) throws YarnException { + + // Input validator + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + + CallableStatement cstmt = null; + Connection conn = null; + + String subClusterHome = null; + ApplicationId appId = + request.getApplicationHomeSubCluster().getApplicationId(); + SubClusterId subClusterId = + request.getApplicationHomeSubCluster().getHomeSubCluster(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_ADD_APPLICATION_HOME_SUBCLUSTER); + + // Set the parameters for the stored procedure + cstmt.setString(1, appId.toString()); + cstmt.setString(2, subClusterId.getId()); + cstmt.registerOutParameter(3, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(4, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + subClusterHome = cstmt.getString(3); + SubClusterId subClusterIdHome = SubClusterId.newInstance(subClusterHome); + + // For failover reason, we check the returned SubClusterId. + // If it is equal to the subclusterId we sent, the call added the new + // application into FederationStateStore. If the call returns a different + // SubClusterId it means we already tried to insert this application but a + // component (Router/StateStore/RM) failed during the submission. + if (subClusterId.equals(subClusterIdHome)) { + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not add a new application into FederationStateStore + if (cstmt.getInt(4) == 0) { + String errMsg = "The application " + appId + + " was not insert into the StateStore"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(4) != 1) { + String errMsg = "Wrong behavior during the insertion of SubCluster " + + subClusterId; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info("Insert into the StateStore the application: " + appId + + " in SubCluster: " + subClusterHome); + } else { + // Check the ROWCOUNT value, if it is different from 0 it means the call + // did edited the table + if (cstmt.getInt(4) != 0) { + String errMsg = + "The application " + appId + " does exist but was overwritten"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + LOG.info("Application: " + appId + " already present with SubCluster: " + + subClusterHome); + } + + } catch (SQLException e) { + FederationStateStoreUtils + .logAndThrowRetriableException(LOG, + "Unable to insert the newly generated application " + + request.getApplicationHomeSubCluster().getApplicationId(), + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return AddApplicationHomeSubClusterResponse + .newInstance(SubClusterId.newInstance(subClusterHome)); + } + + @Override + public UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( + UpdateApplicationHomeSubClusterRequest request) throws YarnException { + + // Input validator + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + + CallableStatement cstmt = null; + Connection conn = null; + + ApplicationId appId = + request.getApplicationHomeSubCluster().getApplicationId(); + SubClusterId subClusterId = + request.getApplicationHomeSubCluster().getHomeSubCluster(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_UPDATE_APPLICATION_HOME_SUBCLUSTER); + + // Set the parameters for the stored procedure + cstmt.setString(1, appId.toString()); + cstmt.setString(2, subClusterId.getId()); + cstmt.registerOutParameter(3, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not update the application into FederationStateStore + if (cstmt.getInt(3) == 0) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(3) != 1) { + String errMsg = + "Wrong behavior during the update of SubCluster " + subClusterId; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info( + "Update the SubCluster to {} for application {} in the StateStore", + subClusterId, appId); + + } catch (SQLException e) { + FederationStateStoreUtils + .logAndThrowRetriableException(LOG, + "Unable to update the application " + + request.getApplicationHomeSubCluster().getApplicationId(), + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return UpdateApplicationHomeSubClusterResponse.newInstance(); + } + + @Override + public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest request) throws YarnException { + // Input validator + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + + CallableStatement cstmt = null; + Connection conn = null; + + SubClusterId homeRM = null; + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_GET_APPLICATION_HOME_SUBCLUSTER); + + // Set the parameters for the stored procedure + cstmt.setString(1, request.getApplicationId().toString()); + cstmt.registerOutParameter(2, java.sql.Types.VARCHAR); + + // Execute the query + cstmt.execute(); + + if (cstmt.getString(2) != null) { + homeRM = SubClusterId.newInstance(cstmt.getString(2)); + } else { + String errMsg = + "Application " + request.getApplicationId() + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Got the information about the specified application " + + request.getApplicationId() + ". The AM is running in " + homeRM); + } + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to obtain the application information " + + "for the specified application " + request.getApplicationId(), + e); + } finally { + + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return GetApplicationHomeSubClusterResponse + .newInstance(ApplicationHomeSubCluster + .newInstance(request.getApplicationId(), homeRM)); + } + + @Override + public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( + GetApplicationsHomeSubClusterRequest request) throws YarnException { + CallableStatement cstmt = null; + Connection conn = null; + ResultSet rs = null; + List appsHomeSubClusters = + new ArrayList(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_GET_APPLICATIONS_HOME_SUBCLUSTER); + + // Execute the query + rs = cstmt.executeQuery(); + + while (rs.next()) { + + // Extract the output for each tuple + String applicationId = rs.getString(1); + String homeSubCluster = rs.getString(2); + + appsHomeSubClusters.add(ApplicationHomeSubCluster.newInstance( + ApplicationId.fromString(applicationId), + SubClusterId.newInstance(homeSubCluster))); + } + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to obtain the information for all the applications ", e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn, rs); + } + return GetApplicationsHomeSubClusterResponse + .newInstance(appsHomeSubClusters); + } + + @Override + public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( + DeleteApplicationHomeSubClusterRequest request) throws YarnException { + + // Input validator + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + + CallableStatement cstmt = null; + Connection conn = null; + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_DELETE_APPLICATION_HOME_SUBCLUSTER); + + // Set the parameters for the stored procedure + cstmt.setString(1, request.getApplicationId().toString()); + cstmt.registerOutParameter(2, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not delete the application from FederationStateStore + if (cstmt.getInt(2) == 0) { + String errMsg = + "Application " + request.getApplicationId() + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(2) != 1) { + String errMsg = "Wrong behavior during deleting the application " + + request.getApplicationId(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info("Delete from the StateStore the application: {}", + request.getApplicationId()); + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to delete the application " + request.getApplicationId(), e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return DeleteApplicationHomeSubClusterResponse.newInstance(); + } + + @Override + public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( + GetSubClusterPolicyConfigurationRequest request) throws YarnException { + + // Input validator + FederationPolicyStoreInputValidator.validate(request); + + CallableStatement cstmt = null; + Connection conn = null; + SubClusterPolicyConfiguration subClusterPolicyConfiguration = null; + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_GET_POLICY_CONFIGURATION); + + // Set the parameters for the stored procedure + cstmt.setString(1, request.getQueue()); + cstmt.registerOutParameter(2, java.sql.Types.VARCHAR); + cstmt.registerOutParameter(3, java.sql.Types.VARBINARY); + + // Execute the query + cstmt.executeUpdate(); + + // Check if the output it is a valid policy + if (cstmt.getString(2) != null && cstmt.getBytes(3) != null) { + subClusterPolicyConfiguration = + SubClusterPolicyConfiguration.newInstance(request.getQueue(), + cstmt.getString(2), ByteBuffer.wrap(cstmt.getBytes(3))); + if (LOG.isDebugEnabled()) { + LOG.debug("Selected from StateStore the policy for the queue: " + + subClusterPolicyConfiguration.toString()); + } + } else { + LOG.warn("Policy for queue: {} does not exist.", request.getQueue()); + return null; + } + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to select the policy for the queue :" + request.getQueue(), + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return GetSubClusterPolicyConfigurationResponse + .newInstance(subClusterPolicyConfiguration); + } + + @Override + public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( + SetSubClusterPolicyConfigurationRequest request) throws YarnException { + + // Input validator + FederationPolicyStoreInputValidator.validate(request); + + CallableStatement cstmt = null; + Connection conn = null; + + SubClusterPolicyConfiguration policyConf = request.getPolicyConfiguration(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_SET_POLICY_CONFIGURATION); + + // Set the parameters for the stored procedure + cstmt.setString(1, policyConf.getQueue()); + cstmt.setString(2, policyConf.getType()); + cstmt.setBytes(3, getByteArray(policyConf.getParams())); + cstmt.registerOutParameter(4, java.sql.Types.INTEGER); + + // Execute the query + cstmt.executeUpdate(); + + // Check the ROWCOUNT value, if it is equal to 0 it means the call + // did not add a new policy into FederationStateStore + if (cstmt.getInt(4) == 0) { + String errMsg = "The policy " + policyConf.getQueue() + + " was not insert into the StateStore"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + // Check the ROWCOUNT value, if it is different from 1 it means the call + // had a wrong behavior. Maybe the database is not set correctly. + if (cstmt.getInt(4) != 1) { + String errMsg = + "Wrong behavior during insert the policy " + policyConf.getQueue(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + LOG.info("Insert into the state store the policy for the queue: " + + policyConf.getQueue()); + + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to insert the newly generated policy for the queue :" + + policyConf.getQueue(), + e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn); + } + return SetSubClusterPolicyConfigurationResponse.newInstance(); + } + + @Override + public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest request) throws YarnException { + + CallableStatement cstmt = null; + Connection conn = null; + ResultSet rs = null; + List policyConfigurations = + new ArrayList(); + + try { + conn = getConnection(); + cstmt = conn.prepareCall(CALL_SP_GET_POLICIES_CONFIGURATIONS); + + // Execute the query + rs = cstmt.executeQuery(); + + while (rs.next()) { + + // Extract the output for each tuple + String queue = rs.getString(1); + String type = rs.getString(2); + byte[] policyInfo = rs.getBytes(3); + + SubClusterPolicyConfiguration subClusterPolicyConfiguration = + SubClusterPolicyConfiguration.newInstance(queue, type, + ByteBuffer.wrap(policyInfo)); + policyConfigurations.add(subClusterPolicyConfiguration); + } + } catch (SQLException e) { + FederationStateStoreUtils.logAndThrowRetriableException(LOG, + "Unable to obtain the policy information for all the queues.", e); + } finally { + // Return to the pool the CallableStatement and the Connection + FederationStateStoreUtils.returnToPool(LOG, cstmt, conn, rs); + } + + return GetSubClusterPoliciesConfigurationsResponse + .newInstance(policyConfigurations); + } + + @Override + public Version getCurrentVersion() { + throw new NotImplementedException(); + } + + @Override + public Version loadVersion() { + throw new NotImplementedException(); + } + + @Override + public void close() throws Exception { + if (dataSource != null) { + dataSource.close(); + } + } + + /** + * Get a connection from the DataSource pool. + * + * @return a connection from the DataSource pool. + * @throws SQLException on failure + */ + public Connection getConnection() throws SQLException { + return dataSource.getConnection(); + } + + private static byte[] getByteArray(ByteBuffer bb) { + byte[] ba = new byte[bb.limit()]; + bb.get(ba); + return ba; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java new file mode 100644 index 00000000000..6ae7d3c688a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/ZookeeperFederationStateStore.java @@ -0,0 +1,634 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import static org.apache.hadoop.util.curator.ZKCuratorManager.getNodePath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.TimeZone; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.curator.ZKCuratorManager; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProto; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterIdPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterInfoPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterPolicyConfigurationPBImpl; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator; +import org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils; +import org.apache.hadoop.yarn.server.records.Version; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * ZooKeeper implementation of {@link FederationStateStore}. + * + * The znode structure is as follows: + * ROOT_DIR_PATH + * |--- MEMBERSHIP + * | |----- SC1 + * | |----- SC2 + * |--- APPLICATION + * | |----- APP1 + * | |----- APP2 + * |--- POLICY + * |----- QUEUE1 + * |----- QUEUE1 + */ +public class ZookeeperFederationStateStore implements FederationStateStore { + + private static final Logger LOG = + LoggerFactory.getLogger(ZookeeperFederationStateStore.class); + + private final static String ROOT_ZNODE_NAME_MEMBERSHIP = "memberships"; + private final static String ROOT_ZNODE_NAME_APPLICATION = "applications"; + private final static String ROOT_ZNODE_NAME_POLICY = "policies"; + + /** Interface to Zookeeper. */ + private ZKCuratorManager zkManager; + + /** Directory to store the state store data. */ + private String baseZNode; + + private String appsZNode; + private String membershipZNode; + private String policiesZNode; + + @Override + public void init(Configuration conf) throws YarnException { + LOG.info("Initializing ZooKeeper connection"); + + baseZNode = conf.get( + YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH); + try { + this.zkManager = new ZKCuratorManager(conf); + this.zkManager.start(); + } catch (IOException e) { + LOG.error("Cannot initialize the ZK connection", e); + } + + // Base znodes + membershipZNode = getNodePath(baseZNode, ROOT_ZNODE_NAME_MEMBERSHIP); + appsZNode = getNodePath(baseZNode, ROOT_ZNODE_NAME_APPLICATION); + policiesZNode = getNodePath(baseZNode, ROOT_ZNODE_NAME_POLICY); + + // Create base znode for each entity + try { + zkManager.createRootDirRecursively(membershipZNode); + zkManager.createRootDirRecursively(appsZNode); + zkManager.createRootDirRecursively(policiesZNode); + } catch (Exception e) { + String errMsg = "Cannot create base directories: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + } + + @Override + public void close() throws Exception { + if (zkManager != null) { + zkManager.close(); + } + } + + @Override + public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest request) throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationHomeSubCluster app = request.getApplicationHomeSubCluster(); + ApplicationId appId = app.getApplicationId(); + + // Try to write the subcluster + SubClusterId homeSubCluster = app.getHomeSubCluster(); + try { + putApp(appId, homeSubCluster, false); + } catch (Exception e) { + String errMsg = "Cannot add application home subcluster for " + appId; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + // Check for the actual subcluster + try { + homeSubCluster = getApp(appId); + } catch (Exception e) { + String errMsg = "Cannot check app home subcluster for " + appId; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + return AddApplicationHomeSubClusterResponse + .newInstance(homeSubCluster); + } + + @Override + public UpdateApplicationHomeSubClusterResponse + updateApplicationHomeSubCluster( + UpdateApplicationHomeSubClusterRequest request) + throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationHomeSubCluster app = request.getApplicationHomeSubCluster(); + ApplicationId appId = app.getApplicationId(); + SubClusterId homeSubCluster = getApp(appId); + if (homeSubCluster == null) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + SubClusterId newSubClusterId = + request.getApplicationHomeSubCluster().getHomeSubCluster(); + putApp(appId, newSubClusterId, true); + return UpdateApplicationHomeSubClusterResponse.newInstance(); + } + + @Override + public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest request) throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationId appId = request.getApplicationId(); + SubClusterId homeSubCluster = getApp(appId); + if (homeSubCluster == null) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return GetApplicationHomeSubClusterResponse.newInstance( + ApplicationHomeSubCluster.newInstance(appId, homeSubCluster)); + } + + @Override + public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( + GetApplicationsHomeSubClusterRequest request) throws YarnException { + List result = new ArrayList<>(); + + try { + for (String child : zkManager.getChildren(appsZNode)) { + ApplicationId appId = ApplicationId.fromString(child); + SubClusterId homeSubCluster = getApp(appId); + ApplicationHomeSubCluster app = + ApplicationHomeSubCluster.newInstance(appId, homeSubCluster); + result.add(app); + } + } catch (Exception e) { + String errMsg = "Cannot get apps: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + return GetApplicationsHomeSubClusterResponse.newInstance(result); + } + + @Override + public DeleteApplicationHomeSubClusterResponse + deleteApplicationHomeSubCluster( + DeleteApplicationHomeSubClusterRequest request) + throws YarnException { + + FederationApplicationHomeSubClusterStoreInputValidator.validate(request); + ApplicationId appId = request.getApplicationId(); + String appZNode = getNodePath(appsZNode, appId.toString()); + + boolean exists = false; + try { + exists = zkManager.exists(appZNode); + } catch (Exception e) { + String errMsg = "Cannot check app: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + if (!exists) { + String errMsg = "Application " + appId + " does not exist"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + try { + zkManager.delete(appZNode); + } catch (Exception e) { + String errMsg = "Cannot delete app: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + return DeleteApplicationHomeSubClusterResponse.newInstance(); + } + + @Override + public SubClusterRegisterResponse registerSubCluster( + SubClusterRegisterRequest request) throws YarnException { + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterInfo subClusterInfo = request.getSubClusterInfo(); + SubClusterId subclusterId = subClusterInfo.getSubClusterId(); + + // Update the heartbeat time + long currentTime = getCurrentTime(); + subClusterInfo.setLastHeartBeat(currentTime); + + try { + putSubclusterInfo(subclusterId, subClusterInfo, true); + } catch (Exception e) { + String errMsg = "Cannot register subcluster: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return SubClusterRegisterResponse.newInstance(); + } + + @Override + public SubClusterDeregisterResponse deregisterSubCluster( + SubClusterDeregisterRequest request) throws YarnException { + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterId subClusterId = request.getSubClusterId(); + SubClusterState state = request.getState(); + + // Get the current information and update it + SubClusterInfo subClusterInfo = getSubclusterInfo(subClusterId); + if (subClusterInfo == null) { + String errMsg = "SubCluster " + subClusterId + " not found"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } else { + subClusterInfo.setState(state); + putSubclusterInfo(subClusterId, subClusterInfo, true); + } + + return SubClusterDeregisterResponse.newInstance(); + } + + @Override + public SubClusterHeartbeatResponse subClusterHeartbeat( + SubClusterHeartbeatRequest request) throws YarnException { + + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterId subClusterId = request.getSubClusterId(); + + SubClusterInfo subClusterInfo = getSubclusterInfo(subClusterId); + if (subClusterInfo == null) { + String errMsg = "SubCluster " + subClusterId + + " does not exist; cannot heartbeat"; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + long currentTime = getCurrentTime(); + subClusterInfo.setLastHeartBeat(currentTime); + subClusterInfo.setState(request.getState()); + subClusterInfo.setCapability(request.getCapability()); + + putSubclusterInfo(subClusterId, subClusterInfo, true); + + return SubClusterHeartbeatResponse.newInstance(); + } + + @Override + public GetSubClusterInfoResponse getSubCluster( + GetSubClusterInfoRequest request) throws YarnException { + + FederationMembershipStateStoreInputValidator.validate(request); + SubClusterId subClusterId = request.getSubClusterId(); + SubClusterInfo subClusterInfo = null; + try { + subClusterInfo = getSubclusterInfo(subClusterId); + if (subClusterInfo == null) { + LOG.warn("The queried SubCluster: {} does not exist.", subClusterId); + return null; + } + } catch (Exception e) { + String errMsg = "Cannot get subcluster: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return GetSubClusterInfoResponse.newInstance(subClusterInfo); + } + + @Override + public GetSubClustersInfoResponse getSubClusters( + GetSubClustersInfoRequest request) throws YarnException { + List result = new ArrayList<>(); + + try { + for (String child : zkManager.getChildren(membershipZNode)) { + SubClusterId subClusterId = SubClusterId.newInstance(child); + SubClusterInfo info = getSubclusterInfo(subClusterId); + if (!request.getFilterInactiveSubClusters() || + info.getState().isActive()) { + result.add(info); + } + } + } catch (Exception e) { + String errMsg = "Cannot get subclusters: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return GetSubClustersInfoResponse.newInstance(result); + } + + + @Override + public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( + GetSubClusterPolicyConfigurationRequest request) throws YarnException { + + FederationPolicyStoreInputValidator.validate(request); + String queue = request.getQueue(); + SubClusterPolicyConfiguration policy = null; + try { + policy = getPolicy(queue); + } catch (Exception e) { + String errMsg = "Cannot get policy: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + + if (policy == null) { + LOG.warn("Policy for queue: {} does not exist.", queue); + return null; + } + return GetSubClusterPolicyConfigurationResponse + .newInstance(policy); + } + + @Override + public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( + SetSubClusterPolicyConfigurationRequest request) throws YarnException { + + FederationPolicyStoreInputValidator.validate(request); + SubClusterPolicyConfiguration policy = + request.getPolicyConfiguration(); + try { + String queue = policy.getQueue(); + putPolicy(queue, policy, true); + } catch (Exception e) { + String errMsg = "Cannot set policy: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return SetSubClusterPolicyConfigurationResponse.newInstance(); + } + + @Override + public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest request) throws YarnException { + List result = new ArrayList<>(); + + try { + for (String child : zkManager.getChildren(policiesZNode)) { + SubClusterPolicyConfiguration policy = getPolicy(child); + result.add(policy); + } + } catch (Exception e) { + String errMsg = "Cannot get policies: " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return GetSubClusterPoliciesConfigurationsResponse.newInstance(result); + } + + @Override + public Version getCurrentVersion() { + return null; + } + + @Override + public Version loadVersion() { + return null; + } + + /** + * Get the subcluster for an application. + * @param appId Application identifier. + * @return Subcluster identifier. + * @throws Exception If it cannot contact ZooKeeper. + */ + private SubClusterId getApp(final ApplicationId appId) throws YarnException { + String appZNode = getNodePath(appsZNode, appId.toString()); + + SubClusterId subClusterId = null; + byte[] data = get(appZNode); + if (data != null) { + try { + subClusterId = new SubClusterIdPBImpl( + SubClusterIdProto.parseFrom(data)); + } catch (InvalidProtocolBufferException e) { + String errMsg = "Cannot parse application at " + appZNode; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + } + return subClusterId; + } + + /** + * Put an application. + * @param appId Application identifier. + * @param subClusterId Subcluster identifier. + * @throws Exception If it cannot contact ZooKeeper. + */ + private void putApp(final ApplicationId appId, + final SubClusterId subClusterId, boolean update) + throws YarnException { + String appZNode = getNodePath(appsZNode, appId.toString()); + SubClusterIdProto proto = + ((SubClusterIdPBImpl)subClusterId).getProto(); + byte[] data = proto.toByteArray(); + put(appZNode, data, update); + } + + /** + * Get the current information for a subcluster from Zookeeper. + * @param subclusterId Subcluster identifier. + * @return Subcluster information or null if it doesn't exist. + * @throws Exception If it cannot contact ZooKeeper. + */ + private SubClusterInfo getSubclusterInfo(final SubClusterId subclusterId) + throws YarnException { + String memberZNode = getNodePath(membershipZNode, subclusterId.toString()); + + SubClusterInfo policy = null; + byte[] data = get(memberZNode); + if (data != null) { + try { + policy = new SubClusterInfoPBImpl( + SubClusterInfoProto.parseFrom(data)); + } catch (InvalidProtocolBufferException e) { + String errMsg = "Cannot parse subcluster info at " + memberZNode; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + } + return policy; + } + + /** + * Put the subcluster information in Zookeeper. + * @param subclusterId Subcluster identifier. + * @param subClusterInfo Subcluster information. + * @throws Exception If it cannot contact ZooKeeper. + */ + private void putSubclusterInfo(final SubClusterId subclusterId, + final SubClusterInfo subClusterInfo, final boolean update) + throws YarnException { + String memberZNode = getNodePath(membershipZNode, subclusterId.toString()); + SubClusterInfoProto proto = + ((SubClusterInfoPBImpl)subClusterInfo).getProto(); + byte[] data = proto.toByteArray(); + put(memberZNode, data, update); + } + + /** + * Get the queue policy from Zookeeper. + * @param queue Name of the queue. + * @return Subcluster policy configuration. + * @throws YarnException If it cannot contact ZooKeeper. + */ + private SubClusterPolicyConfiguration getPolicy(final String queue) + throws YarnException { + String policyZNode = getNodePath(policiesZNode, queue); + + SubClusterPolicyConfiguration policy = null; + byte[] data = get(policyZNode); + if (data != null) { + try { + policy = new SubClusterPolicyConfigurationPBImpl( + SubClusterPolicyConfigurationProto.parseFrom(data)); + } catch (InvalidProtocolBufferException e) { + String errMsg = "Cannot parse policy at " + policyZNode; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + } + return policy; + } + + /** + * Put the subcluster information in Zookeeper. + * @param queue Name of the queue. + * @param policy Subcluster policy configuration. + * @throws YarnException If it cannot contact ZooKeeper. + */ + private void putPolicy(final String queue, + final SubClusterPolicyConfiguration policy, boolean update) + throws YarnException { + String policyZNode = getNodePath(policiesZNode, queue); + + SubClusterPolicyConfigurationProto proto = + ((SubClusterPolicyConfigurationPBImpl)policy).getProto(); + byte[] data = proto.toByteArray(); + put(policyZNode, data, update); + } + + /** + * Get data from a znode in Zookeeper. + * @param znode Path of the znode. + * @return Data in the znode. + * @throws YarnException If it cannot contact ZooKeeper. + */ + private byte[] get(String znode) throws YarnException { + boolean exists = false; + try { + exists = zkManager.exists(znode); + } catch (Exception e) { + String errMsg = "Cannot find znode " + znode; + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + if (!exists) { + LOG.error("{} does not exist", znode); + return null; + } + + byte[] data = null; + try { + data = zkManager.getData(znode); + } catch (Exception e) { + String errMsg = "Cannot get data from znode " + znode + + ": " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + return data; + } + + /** + * Put data into a znode in Zookeeper. + * @param znode Path of the znode. + * @param data Data to write. + * @throws YarnException If it cannot contact ZooKeeper. + */ + private void put(String znode, byte[] data, boolean update) + throws YarnException { + // Create the znode + boolean created = false; + try { + created = zkManager.create(znode); + } catch (Exception e) { + String errMsg = "Cannot create znode " + znode + ": " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + if (!created) { + LOG.debug("{} not created", znode); + if (!update) { + LOG.info("{} already existed and we are not updating", znode); + return; + } + } + + // Write the data into the znode + try { + zkManager.setData(znode, data, -1); + } catch (Exception e) { + String errMsg = "Cannot write data into znode " + znode + + ": " + e.getMessage(); + FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); + } + } + + /** + * Get the current time. + * @return Current time in milliseconds. + */ + private static long getCurrentTime() { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + return cal.getTimeInMillis(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/package-info.java new file mode 100644 index 00000000000..56e1274bb66 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store.impl; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java new file mode 100644 index 00000000000..33179e9fe9b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/AddApplicationHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/AddApplicationHomeSubClusterRequest.java new file mode 100644 index 00000000000..9cb05890bd8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/AddApplicationHomeSubClusterRequest.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request sent by the Router to Federation state + * store to map the home subcluster of a newly submitted application. + * + *

+ * The request includes the mapping details, i.e.: + *

    + *
  • {@code ApplicationId}
  • + *
  • {@code SubClusterId}
  • + *
+ */ +@Private +@Unstable +public abstract class AddApplicationHomeSubClusterRequest { + + @Private + @Unstable + public static AddApplicationHomeSubClusterRequest newInstance( + ApplicationHomeSubCluster applicationHomeSubCluster) { + AddApplicationHomeSubClusterRequest mapRequest = + Records.newRecord(AddApplicationHomeSubClusterRequest.class); + mapRequest.setApplicationHomeSubCluster(applicationHomeSubCluster); + return mapRequest; + } + + /** + * Get the {@link ApplicationHomeSubCluster} representing the mapping of the + * application to it's home sub-cluster. + * + * @return the mapping of the application to it's home sub-cluster. + */ + @Public + @Unstable + public abstract ApplicationHomeSubCluster getApplicationHomeSubCluster(); + + /** + * Set the {@link ApplicationHomeSubCluster} representing the mapping of the + * application to it's home sub-cluster. + * + * @param applicationHomeSubCluster the mapping of the application to it's + * home sub-cluster. + */ + @Private + @Unstable + public abstract void setApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationHomeSubCluster); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/AddApplicationHomeSubClusterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/AddApplicationHomeSubClusterResponse.java new file mode 100644 index 00000000000..913f8e6f71c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/AddApplicationHomeSubClusterResponse.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * AddApplicationHomeSubClusterResponse contains the answer from the + * {@code FederationApplicationHomeSubClusterStore} to a request to insert a + * newly generated applicationId and its owner. + * + * The response contains application's home sub-cluster as it is stored in the + * {@code FederationApplicationHomeSubClusterStore}. If a mapping for the + * application already existed, the {@code SubClusterId} in this response will + * return the existing mapping which might be different from that in the + * {@code AddApplicationHomeSubClusterRequest}. + */ +@Private +@Unstable +public abstract class AddApplicationHomeSubClusterResponse { + + @Private + @Unstable + public static AddApplicationHomeSubClusterResponse newInstance( + SubClusterId homeSubCluster) { + AddApplicationHomeSubClusterResponse response = + Records.newRecord(AddApplicationHomeSubClusterResponse.class); + response.setHomeSubCluster(homeSubCluster); + return response; + } + + /** + * Set the home sub-cluster that this application has been assigned to. + * + * @param homeSubCluster the {@link SubClusterId} of this application's home + * sub-cluster + */ + public abstract void setHomeSubCluster(SubClusterId homeSubCluster); + + /** + * Get the home sub-cluster that this application has been assigned to. This + * may not match the {@link SubClusterId} in the corresponding response, if + * the mapping for the request's application already existed. + * + * @return the {@link SubClusterId} of this application's home sub-cluster + */ + public abstract SubClusterId getHomeSubCluster(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java new file mode 100644 index 00000000000..5e4c7ccf4ef --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * ApplicationHomeSubCluster is a report of the runtime information of the + * application that is running in the federated cluster. + * + *

+ * It includes information such as: + *

    + *
  • {@link ApplicationId}
  • + *
  • {@link SubClusterId}
  • + *
+ * + */ +@Private +@Unstable +public abstract class ApplicationHomeSubCluster { + + @Private + @Unstable + public static ApplicationHomeSubCluster newInstance(ApplicationId appId, + SubClusterId homeSubCluster) { + ApplicationHomeSubCluster appMapping = + Records.newRecord(ApplicationHomeSubCluster.class); + appMapping.setApplicationId(appId); + appMapping.setHomeSubCluster(homeSubCluster); + return appMapping; + } + + /** + * Get the {@link ApplicationId} representing the unique identifier of the + * application. + * + * @return the application identifier + */ + @Public + @Unstable + public abstract ApplicationId getApplicationId(); + + /** + * Set the {@link ApplicationId} representing the unique identifier of the + * application. + * + * @param applicationId the application identifier + */ + @Private + @Unstable + public abstract void setApplicationId(ApplicationId applicationId); + + /** + * Get the {@link SubClusterId} representing the unique identifier of the home + * subcluster in which the ApplicationMaster of the application is running. + * + * @return the home subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getHomeSubCluster(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the home + * subcluster in which the ApplicationMaster of the application is running. + * + * @param homeSubCluster the home subcluster identifier + */ + @Private + @Unstable + public abstract void setHomeSubCluster(SubClusterId homeSubCluster); + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + ApplicationHomeSubCluster other = (ApplicationHomeSubCluster) obj; + if (!this.getApplicationId().equals(other.getApplicationId())) { + return false; + } + return this.getHomeSubCluster().equals(other.getHomeSubCluster()); + } + + @Override + public int hashCode() { + return getApplicationId().hashCode() * 31 + getHomeSubCluster().hashCode(); + } + + @Override + public String toString() { + return "ApplicationHomeSubCluster [getApplicationId()=" + + getApplicationId() + ", getHomeSubCluster()=" + getHomeSubCluster() + + "]"; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteApplicationHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteApplicationHomeSubClusterRequest.java new file mode 100644 index 00000000000..f678aeec1d8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteApplicationHomeSubClusterRequest.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.Records; + +/** + * The request to Federation state store to delete the mapping of + * home subcluster of a submitted application. + */ +@Private +@Unstable +public abstract class DeleteApplicationHomeSubClusterRequest { + + @Private + @Unstable + public static DeleteApplicationHomeSubClusterRequest newInstance( + ApplicationId applicationId) { + DeleteApplicationHomeSubClusterRequest deleteApplicationRequest = + Records.newRecord(DeleteApplicationHomeSubClusterRequest.class); + deleteApplicationRequest.setApplicationId(applicationId); + return deleteApplicationRequest; + } + + /** + * Get the identifier of the {@link ApplicationId} to be removed from + * Federation state store . + * + * @return the identifier of the application to be removed from Federation + * State Store. + */ + @Public + @Unstable + public abstract ApplicationId getApplicationId(); + + /** + * Set the identifier of the {@link ApplicationId} to be removed from + * Federation state store . + * + * @param applicationId the identifier of the application to be removed from + * Federation State Store. + */ + @Private + @Unstable + public abstract void setApplicationId(ApplicationId applicationId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteApplicationHomeSubClusterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteApplicationHomeSubClusterResponse.java new file mode 100644 index 00000000000..fb1bef96dbe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/DeleteApplicationHomeSubClusterResponse.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * DeleteApplicationHomeSubClusterResponse contains the answer from the {@code + * FederationApplicationHomeSubClusterStore} to a request to delete the mapping + * of home subcluster of a submitted application. Currently response is empty if + * the operation was successful, if not an exception reporting reason for a + * failure. + */ +@Private +@Unstable +public abstract class DeleteApplicationHomeSubClusterResponse { + + @Private + @Unstable + public static DeleteApplicationHomeSubClusterResponse newInstance() { + DeleteApplicationHomeSubClusterResponse response = + Records.newRecord(DeleteApplicationHomeSubClusterResponse.class); + return response; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationHomeSubClusterRequest.java new file mode 100644 index 00000000000..a64d22e1802 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationHomeSubClusterRequest.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class to obtain the home sub-cluster for the specified + * {@link ApplicationId}. + */ +@Private +@Unstable +public abstract class GetApplicationHomeSubClusterRequest { + + @Private + @Unstable + public static GetApplicationHomeSubClusterRequest newInstance( + ApplicationId appId) { + GetApplicationHomeSubClusterRequest appMapping = + Records.newRecord(GetApplicationHomeSubClusterRequest.class); + appMapping.setApplicationId(appId); + return appMapping; + } + + /** + * Get the {@link ApplicationId} representing the unique identifier of the + * application. + * + * @return the application identifier + */ + @Public + @Unstable + public abstract ApplicationId getApplicationId(); + + /** + * Set the {@link ApplicationId} representing the unique identifier of the + * application. + * + * @param applicationId the application identifier + */ + @Private + @Unstable + public abstract void setApplicationId(ApplicationId applicationId); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationHomeSubClusterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationHomeSubClusterResponse.java new file mode 100644 index 00000000000..60735b382f1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationHomeSubClusterResponse.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The response sent by Federation state + * store to a query for the home subcluster of a newly submitted + * application. + * + *

+ * The request includes the mapping details, i.e.: + *

    + *
  • {@code ApplicationId}
  • + *
  • {@code SubClusterId}
  • + *
+ */ +@Private +@Unstable +public abstract class GetApplicationHomeSubClusterResponse { + + @Private + @Unstable + public static GetApplicationHomeSubClusterResponse newInstance( + ApplicationHomeSubCluster applicationHomeSubCluster) { + GetApplicationHomeSubClusterResponse mapResponse = + Records.newRecord(GetApplicationHomeSubClusterResponse.class); + mapResponse.setApplicationHomeSubCluster(applicationHomeSubCluster); + return mapResponse; + } + + /** + * Get the {@link ApplicationHomeSubCluster} representing the mapping of the + * application to it's home sub-cluster. + * + * @return the mapping of the application to it's home sub-cluster. + */ + @Public + @Unstable + public abstract ApplicationHomeSubCluster getApplicationHomeSubCluster(); + + /** + * Set the {@link ApplicationHomeSubCluster} representing the mapping of the + * application to it's home sub-cluster. + * + * @param applicationHomeSubCluster the mapping of the application to it's + * home sub-cluster. + */ + @Private + @Unstable + public abstract void setApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationHomeSubCluster); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java new file mode 100644 index 00000000000..60549722093 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterRequest.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class to obtain the home sub-cluster mapping of all active + * applications. + */ +@Private +@Unstable +public abstract class GetApplicationsHomeSubClusterRequest { + + @Private + @Unstable + public static GetApplicationsHomeSubClusterRequest newInstance() { + GetApplicationsHomeSubClusterRequest request = + Records.newRecord(GetApplicationsHomeSubClusterRequest.class); + return request; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterResponse.java new file mode 100644 index 00000000000..ba3d2c678e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetApplicationsHomeSubClusterResponse.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The response sent by Federation state + * store to a query for the home subcluster of all submitted + * applications. + * + *

+ * The response includes the mapping details, i.e.: + *

    + *
  • {@code ApplicationId}
  • + *
  • {@code SubClusterId}
  • + *
+ */ +@Private +@Unstable +public abstract class GetApplicationsHomeSubClusterResponse { + + @Private + @Unstable + public static GetApplicationsHomeSubClusterResponse newInstance( + List appsHomeSubClusters) { + GetApplicationsHomeSubClusterResponse mapResponse = + Records.newRecord(GetApplicationsHomeSubClusterResponse.class); + mapResponse.setAppsHomeSubClusters(appsHomeSubClusters); + return mapResponse; + } + + /** + * Get the {@link ApplicationHomeSubCluster} list representing the mapping of + * all submitted applications to it's home sub-cluster. + * + * @return the mapping of all submitted application to it's home sub-cluster. + */ + @Public + @Unstable + public abstract List getAppsHomeSubClusters(); + + /** + * Set the {@link ApplicationHomeSubCluster} list representing the mapping of + * all submitted applications to it's home sub-cluster. + * + * @param appsHomeSubClusters the mapping of all submitted application to it's + * home sub-cluster. + */ + @Private + @Unstable + public abstract void setAppsHomeSubClusters( + List appsHomeSubClusters); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java new file mode 100644 index 00000000000..656dea948ac --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoRequest.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class to obtain information about a sub-cluster identified by its + * {@link SubClusterId}. + */ +@Private +@Unstable +public abstract class GetSubClusterInfoRequest { + + @Private + @Unstable + public static GetSubClusterInfoRequest newInstance( + SubClusterId subClusterId) { + GetSubClusterInfoRequest subClusterRequest = + Records.newRecord(GetSubClusterInfoRequest.class); + subClusterRequest.setSubClusterId(subClusterId); + return subClusterRequest; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @param subClusterId the subcluster identifier + */ + @Public + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java new file mode 100644 index 00000000000..f7bc74d6f31 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterInfoResponse.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Response to a query with {@link SubClusterInfo} about a sub-cluster. + */ +@Private +@Unstable +public abstract class GetSubClusterInfoResponse { + + @Private + @Unstable + public static GetSubClusterInfoResponse newInstance( + SubClusterInfo subClusterInfo) { + GetSubClusterInfoResponse registerSubClusterRequest = + Records.newRecord(GetSubClusterInfoResponse.class); + registerSubClusterRequest.setSubClusterInfo(subClusterInfo); + return registerSubClusterRequest; + } + + /** + * Get the {@link SubClusterInfo} encapsulating the information about the + * sub-cluster. + * + * @return the information pertaining to the sub-cluster + */ + @Public + @Unstable + public abstract SubClusterInfo getSubClusterInfo(); + + /** + * Set the {@link SubClusterInfo} encapsulating the information about the + * sub-cluster. + * + * @param subClusterInfo the information pertaining to the sub-cluster + */ + @Private + @Unstable + public abstract void setSubClusterInfo(SubClusterInfo subClusterInfo); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPoliciesConfigurationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPoliciesConfigurationsRequest.java new file mode 100644 index 00000000000..8cb84f3070e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPoliciesConfigurationsRequest.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * GetSubClusterPoliciesConfigurationsRequest is a request to the + * {@code FederationPolicyStore} to obtain all policy configurations. + */ +@Private +@Unstable +public abstract class GetSubClusterPoliciesConfigurationsRequest { + public static GetSubClusterPoliciesConfigurationsRequest newInstance() { + return Records.newRecord(GetSubClusterPoliciesConfigurationsRequest.class); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPoliciesConfigurationsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPoliciesConfigurationsResponse.java new file mode 100644 index 00000000000..2eaeb512c10 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPoliciesConfigurationsResponse.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * GetSubClusterPolicyConfigurationResponse contains the answer from the {@code + * FederationPolicyStore} to a request to get all the policies configured in the + * system via a {@link SubClusterPolicyConfiguration}. + */ +@Private +@Unstable +public abstract class GetSubClusterPoliciesConfigurationsResponse { + + @Private + @Unstable + public static GetSubClusterPoliciesConfigurationsResponse newInstance( + List policyConfigurations) { + GetSubClusterPoliciesConfigurationsResponse response = + Records.newRecord(GetSubClusterPoliciesConfigurationsResponse.class); + response.setPoliciesConfigs(policyConfigurations); + return response; + } + + /** + * Get all the policies configured in the system. + * + * @return all the policies configured in the system + */ + @Public + @Unstable + public abstract List getPoliciesConfigs(); + + /** + * Sets all the policies configured in the system. + * + * @param policyConfigurations all the policies configured in the system + */ + @Private + @Unstable + public abstract void setPoliciesConfigs( + List policyConfigurations); + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPolicyConfigurationRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPolicyConfigurationRequest.java new file mode 100644 index 00000000000..c3f49e1e486 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPolicyConfigurationRequest.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * GetSubClusterPolicyConfigurationRequest is a request to the + * {@code FederationPolicyStore} to get the configuration of a policy for a + * given queue. + */ +@Private +@Unstable +public abstract class GetSubClusterPolicyConfigurationRequest { + + @Private + @Unstable + public static GetSubClusterPolicyConfigurationRequest newInstance( + String queueName) { + GetSubClusterPolicyConfigurationRequest request = + Records.newRecord(GetSubClusterPolicyConfigurationRequest.class); + request.setQueue(queueName); + return request; + } + + /** + * Get the name of the queue for which we are requesting a policy + * configuration. + * + * @return the name of the queue + */ + @Public + @Unstable + public abstract String getQueue(); + + /** + * Sets the name of the queue for which we are requesting a policy + * configuration. + * + * @param queueName the name of the queue + */ + @Private + @Unstable + public abstract void setQueue(String queueName); +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPolicyConfigurationResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPolicyConfigurationResponse.java new file mode 100644 index 00000000000..350b2393013 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClusterPolicyConfigurationResponse.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * GetSubClusterPolicyConfigurationResponse contains the answer from the {@code + * FederationPolicyStore} to a request to get the information about how a policy + * should be configured via a {@link SubClusterPolicyConfiguration}. + */ +@Private +@Unstable +public abstract class GetSubClusterPolicyConfigurationResponse { + + @Private + @Unstable + public static GetSubClusterPolicyConfigurationResponse newInstance( + SubClusterPolicyConfiguration policy) { + GetSubClusterPolicyConfigurationResponse response = + Records.newRecord(GetSubClusterPolicyConfigurationResponse.class); + response.setPolicyConfiguration(policy); + return response; + } + + /** + * Get the policy configuration. + * + * @return the policy configuration for the specified queue + */ + @Public + @Unstable + public abstract SubClusterPolicyConfiguration getPolicyConfiguration(); + + /** + * Sets the policyConfiguration configuration. + * + * @param policyConfiguration the policyConfiguration configuration for the + * specified queue + */ + @Private + @Unstable + public abstract void setPolicyConfiguration( + SubClusterPolicyConfiguration policyConfiguration); + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java new file mode 100644 index 00000000000..90d2f99f5dd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoRequest.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class to obtain information about all sub-clusters that are + * participating in federation. + * + * If filterInactiveSubClusters is set to true, only active sub-clusters will be + * returned; otherwise, all sub-clusters will be returned regardless of state. + * By default, filterInactiveSubClusters is true. + */ +@Private +@Unstable +public abstract class GetSubClustersInfoRequest { + + @Public + @Unstable + public static GetSubClustersInfoRequest newInstance( + boolean filterInactiveSubClusters) { + GetSubClustersInfoRequest request = + Records.newRecord(GetSubClustersInfoRequest.class); + request.setFilterInactiveSubClusters(filterInactiveSubClusters); + return request; + } + + /** + * Get the flag that indicates whether only active sub-clusters should be + * returned. + * + * @return whether to filter out inactive sub-clusters + */ + @Public + @Unstable + public abstract boolean getFilterInactiveSubClusters(); + + /** + * Set the flag that indicates whether only active sub-clusters should be + * returned. + * + * @param filterInactiveSubClusters whether to filter out inactive + * sub-clusters + */ + @Public + @Unstable + public abstract void setFilterInactiveSubClusters( + boolean filterInactiveSubClusters); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java new file mode 100644 index 00000000000..bcf75aba1ae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/GetSubClustersInfoResponse.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * Response to a query with list of {@link SubClusterInfo} about all + * sub-clusters that are currently participating in Federation. + */ +@Private +@Unstable +public abstract class GetSubClustersInfoResponse { + + @Public + @Unstable + public static GetSubClustersInfoResponse newInstance( + List subClusters) { + GetSubClustersInfoResponse subClusterInfos = + Records.newRecord(GetSubClustersInfoResponse.class); + subClusterInfos.setSubClusters(subClusters); + return subClusterInfos; + } + + /** + * Get the list of {@link SubClusterInfo} representing the information about + * all sub-clusters that are currently participating in Federation. + * + * @return the list of {@link SubClusterInfo} + */ + @Public + @Unstable + public abstract List getSubClusters(); + + /** + * Set the list of {@link SubClusterInfo} representing the information about + * all sub-clusters that are currently participating in Federation. + * + * @param subClusters the list of {@link SubClusterInfo} + */ + @Private + @Unstable + public abstract void setSubClusters(List subClusters); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SetSubClusterPolicyConfigurationRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SetSubClusterPolicyConfigurationRequest.java new file mode 100644 index 00000000000..743ad0ebddd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SetSubClusterPolicyConfigurationRequest.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SetSubClusterPolicyConfigurationRequest is a request to the + * {@code FederationPolicyStore} to set the policy configuration corresponding + * to a queue. + */ +@Private +@Unstable +public abstract class SetSubClusterPolicyConfigurationRequest { + @Private + @Unstable + public static SetSubClusterPolicyConfigurationRequest newInstance( + SubClusterPolicyConfiguration policy) { + SetSubClusterPolicyConfigurationRequest request = + Records.newRecord(SetSubClusterPolicyConfigurationRequest.class); + request.setPolicyConfiguration(policy); + return request; + } + + /** + * Get the policy configuration assigned to the queue. + * + * @return the policy for the specified queue + */ + @Public + @Unstable + public abstract SubClusterPolicyConfiguration getPolicyConfiguration(); + + /** + * Set the policyConfiguration configuration for the queue. + * + * @param policyConfiguration the policyConfiguration for the specified queue + */ + @Private + @Unstable + public abstract void setPolicyConfiguration( + SubClusterPolicyConfiguration policyConfiguration); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SetSubClusterPolicyConfigurationResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SetSubClusterPolicyConfigurationResponse.java new file mode 100644 index 00000000000..401e984983e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SetSubClusterPolicyConfigurationResponse.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SetSubClusterPolicyConfigurationResponse contains the answer from the + * {@code FederationPolicyStore} to a request to set for a policy configuration + * for a given queue. + */ +@Private +@Unstable +public abstract class SetSubClusterPolicyConfigurationResponse { + public static SetSubClusterPolicyConfigurationResponse newInstance() { + return Records.newRecord(SetSubClusterPolicyConfigurationResponse.class); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java new file mode 100644 index 00000000000..50a50a182fb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterRequest.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request sent to set the state of a subcluster to either + * SC_DECOMMISSIONED, SC_LOST, or SC_DEREGISTERED. + * + *

+ * The update includes details such as: + *

    + *
  • {@link SubClusterId}
  • + *
  • {@link SubClusterState}
  • + *
+ */ +@Private +@Unstable +public abstract class SubClusterDeregisterRequest { + + @Private + @Unstable + public static SubClusterDeregisterRequest newInstance( + SubClusterId subClusterId, SubClusterState subClusterState) { + SubClusterDeregisterRequest registerRequest = + Records.newRecord(SubClusterDeregisterRequest.class); + registerRequest.setSubClusterId(subClusterId); + registerRequest.setState(subClusterState); + return registerRequest; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @param subClusterId the subcluster identifier + */ + @Private + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); + + /** + * Get the {@link SubClusterState} of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract SubClusterState getState(); + + /** + * Set the {@link SubClusterState} of the subcluster. + * + * @param state the state of the subCluster + */ + @Private + @Unstable + public abstract void setState(SubClusterState state); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java new file mode 100644 index 00000000000..74fe9944813 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterDeregisterResponse.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SubClusterDeregisterResponse contains the answer from the {@code + * FederationMembershipStateStore} to a request to deregister the sub cluster. + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + */ +@Private +@Unstable +public abstract class SubClusterDeregisterResponse { + + @Private + @Unstable + public static SubClusterDeregisterResponse newInstance() { + SubClusterDeregisterResponse response = + Records.newRecord(SubClusterDeregisterResponse.class); + return response; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java new file mode 100644 index 00000000000..3a07c18bf2a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatRequest.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * SubClusterHeartbeatRequest is a report of the runtime information of the + * subcluster that is participating in federation. + * + *

+ * It includes information such as: + *

    + *
  • {@link SubClusterId}
  • + *
  • The URL of the subcluster
  • + *
  • The timestamp representing the last start time of the subCluster
  • + *
  • {@code FederationsubClusterState}
  • + *
  • The current capacity and utilization of the subCluster
  • + *
+ */ +@Private +@Unstable +public abstract class SubClusterHeartbeatRequest { + + @Private + @Unstable + public static SubClusterHeartbeatRequest newInstance( + SubClusterId subClusterId, SubClusterState state, String capability) { + return newInstance(subClusterId, 0, state, capability); + } + + @Private + @Unstable + public static SubClusterHeartbeatRequest newInstance( + SubClusterId subClusterId, long lastHeartBeat, SubClusterState state, + String capability) { + SubClusterHeartbeatRequest subClusterHeartbeatRequest = + Records.newRecord(SubClusterHeartbeatRequest.class); + subClusterHeartbeatRequest.setSubClusterId(subClusterId); + subClusterHeartbeatRequest.setLastHeartBeat(lastHeartBeat); + subClusterHeartbeatRequest.setState(state); + subClusterHeartbeatRequest.setCapability(capability); + return subClusterHeartbeatRequest; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subCluster. + * + * @param subClusterId the subCluster identifier + */ + @Private + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); + + /** + * Get the last heart beat time of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract long getLastHeartBeat(); + + /** + * Set the last heartbeat time of the subcluster. + * + * @param time the last heartbeat time of the subcluster + */ + @Private + @Unstable + public abstract void setLastHeartBeat(long time); + + /** + * Get the {@link SubClusterState} of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract SubClusterState getState(); + + /** + * Set the {@link SubClusterState} of the subcluster. + * + * @param state the state of the subCluster + */ + @Private + @Unstable + public abstract void setState(SubClusterState state); + + /** + * Get the current capacity and utilization of the subcluster. This is the + * JAXB marshalled string representation of the ClusterMetrics. + * + * @return the current capacity and utilization of the subcluster + */ + @Public + @Unstable + public abstract String getCapability(); + + /** + * Set the current capacity and utilization of the subCluster. This is the + * JAXB marshalled string representation of the ClusterMetrics. + * + * @param capability the current capacity and utilization of the subcluster + */ + @Private + @Unstable + public abstract void setCapability(String capability); + + @Override + public String toString() { + return "SubClusterHeartbeatRequest [getSubClusterId() = " + + getSubClusterId() + ", getState() = " + getState() + + ", getLastHeartBeat = " + getLastHeartBeat() + ", getCapability() = " + + getCapability() + "]"; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java new file mode 100644 index 00000000000..0b7fd8cd2ac --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterHeartbeatResponse.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SubClusterHeartbeatResponse contains the response from the {@code + * FederationMembershipStateStore} to a periodic heartbeat to indicate + * liveliness from a ResourceManager participating in federation. + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + *

+ * NOTE: This can be extended to push down policies in future + */ +@Private +@Unstable +public abstract class SubClusterHeartbeatResponse { + + @Private + @Unstable + public static SubClusterHeartbeatResponse newInstance() { + SubClusterHeartbeatResponse response = + Records.newRecord(SubClusterHeartbeatResponse.class); + return response; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java new file mode 100644 index 00000000000..fec967d86bd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * SubClusterId represents the globally unique identifier for a + * subcluster that is participating in federation. + * + *

+ * The globally unique nature of the identifier is obtained from the + * FederationMembershipStateStore on initialization. + */ +@Private +@Unstable +public abstract class SubClusterId implements Comparable { + + @Private + @Unstable + public static SubClusterId newInstance(String subClusterId) { + SubClusterId id = Records.newRecord(SubClusterId.class); + id.setId(subClusterId); + return id; + } + + /** + * Get the string identifier of the subcluster which is unique across + * the federated cluster. The identifier is static, i.e. preserved across + * restarts and failover. + * + * @return unique identifier of the subcluster + */ + @Public + @Unstable + public abstract String getId(); + + /** + * Set the string identifier of the subcluster which is unique across + * the federated cluster. The identifier is static, i.e. preserved across + * restarts and failover. + * + * @param subClusterId unique identifier of the subcluster + */ + @Private + @Unstable + protected abstract void setId(String subClusterId); + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SubClusterId other = (SubClusterId) obj; + return this.getId().equals(other.getId()); + } + + @Override + public int hashCode() { + return getId().hashCode(); + } + + @Override + public int compareTo(SubClusterId other) { + return getId().compareTo(other.getId()); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getId()); + return sb.toString(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java new file mode 100644 index 00000000000..e2260a1f457 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +/** + * This class represent a sub-cluster identifier in the JSON representation + * of the policy configuration. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +@XmlRootElement(name = "federation-policy") +@XmlAccessorType(XmlAccessType.FIELD) +public class SubClusterIdInfo { + + private String id; + + public SubClusterIdInfo() { + //JAXB needs this + } + + public SubClusterIdInfo(String subClusterId) { + this.id = subClusterId; + } + + public SubClusterIdInfo(SubClusterId subClusterId) { + this.id = subClusterId.getId(); + } + + /** + * Get the sub-cluster identifier as {@link SubClusterId}. + * @return the sub-cluster id. + */ + public SubClusterId toId() { + return SubClusterId.newInstance(id); + } + + @Override + public boolean equals(Object other) { + if (other instanceof SubClusterIdInfo) { + if (((SubClusterIdInfo) other).id.equals(this.id)) { + return true; + } + } + return false; + } + + @Override + public int hashCode() { + return id.hashCode(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java new file mode 100644 index 00000000000..cbf64e6126b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java @@ -0,0 +1,325 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * SubClusterInfo is a report of the runtime information of the subcluster that + * is participating in federation. + * + *

+ * It includes information such as: + *

    + *
  • {@link SubClusterId}
  • + *
  • The URL of the subcluster
  • + *
  • The timestamp representing the last start time of the subCluster
  • + *
  • {@code FederationsubClusterState}
  • + *
  • The current capacity and utilization of the subCluster
  • + *
+ */ +@Private +@Unstable +public abstract class SubClusterInfo { + + @Private + @Unstable + public static SubClusterInfo newInstance(SubClusterId subClusterId, + String amRMServiceAddress, String clientRMServiceAddress, + String rmAdminServiceAddress, String rmWebServiceAddress, + SubClusterState state, long lastStartTime, String capability) { + return newInstance(subClusterId, amRMServiceAddress, clientRMServiceAddress, + rmAdminServiceAddress, rmWebServiceAddress, 0, state, lastStartTime, + capability); + } + + @Private + @Unstable + public static SubClusterInfo newInstance(SubClusterId subClusterId, + String amRMServiceAddress, String clientRMServiceAddress, + String rmAdminServiceAddress, String rmWebServiceAddress, + long lastHeartBeat, SubClusterState state, long lastStartTime, + String capability) { + SubClusterInfo subClusterInfo = Records.newRecord(SubClusterInfo.class); + subClusterInfo.setSubClusterId(subClusterId); + subClusterInfo.setAMRMServiceAddress(amRMServiceAddress); + subClusterInfo.setClientRMServiceAddress(clientRMServiceAddress); + subClusterInfo.setRMAdminServiceAddress(rmAdminServiceAddress); + subClusterInfo.setRMWebServiceAddress(rmWebServiceAddress); + subClusterInfo.setLastHeartBeat(lastHeartBeat); + subClusterInfo.setState(state); + subClusterInfo.setLastStartTime(lastStartTime); + subClusterInfo.setCapability(capability); + return subClusterInfo; + } + + /** + * Get the {@link SubClusterId} representing the unique identifier of the + * subcluster. + * + * @return the subcluster identifier + */ + @Public + @Unstable + public abstract SubClusterId getSubClusterId(); + + /** + * Set the {@link SubClusterId} representing the unique identifier of the + * subCluster. + * + * @param subClusterId the subCluster identifier + */ + @Private + @Unstable + public abstract void setSubClusterId(SubClusterId subClusterId); + + /** + * Get the URL of the AM-RM service endpoint of the subcluster + * ResourceManager. + * + * @return the URL of the AM-RM service endpoint of the subcluster + * ResourceManager + */ + @Public + @Unstable + public abstract String getAMRMServiceAddress(); + + /** + * Set the URL of the AM-RM service endpoint of the subcluster + * ResourceManager. + * + * @param amRMServiceAddress the URL of the AM-RM service endpoint of the + * subcluster ResourceManager + */ + @Private + @Unstable + public abstract void setAMRMServiceAddress(String amRMServiceAddress); + + /** + * Get the URL of the client-RM service endpoint of the subcluster + * ResourceManager. + * + * @return the URL of the client-RM service endpoint of the subcluster + * ResourceManager + */ + @Public + @Unstable + public abstract String getClientRMServiceAddress(); + + /** + * Set the URL of the client-RM service endpoint of the subcluster + * ResourceManager. + * + * @param clientRMServiceAddress the URL of the client-RM service endpoint of + * the subCluster ResourceManager + */ + @Private + @Unstable + public abstract void setClientRMServiceAddress(String clientRMServiceAddress); + + /** + * Get the URL of the ResourceManager administration service. + * + * @return the URL of the ResourceManager administration service + */ + @Public + @Unstable + public abstract String getRMAdminServiceAddress(); + + /** + * Set the URL of the ResourceManager administration service. + * + * @param rmAdminServiceAddress the URL of the ResourceManager + * administration service. + */ + @Private + @Unstable + public abstract void setRMAdminServiceAddress(String rmAdminServiceAddress); + + /** + * Get the URL of the ResourceManager web application interface. + * + * @return the URL of the ResourceManager web application + * interface. + */ + @Public + @Unstable + public abstract String getRMWebServiceAddress(); + + /** + * Set the URL of the ResourceManager web application interface. + * + * @param rmWebServiceAddress the URL of the ResourceManager web + * application interface. + */ + @Private + @Unstable + public abstract void setRMWebServiceAddress(String rmWebServiceAddress); + + /** + * Get the last heart beat time of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract long getLastHeartBeat(); + + /** + * Set the last heartbeat time of the subcluster. + * + * @param time the last heartbeat time of the subcluster + */ + @Private + @Unstable + public abstract void setLastHeartBeat(long time); + + /** + * Get the {@link SubClusterState} of the subcluster. + * + * @return the state of the subcluster + */ + @Public + @Unstable + public abstract SubClusterState getState(); + + /** + * Set the {@link SubClusterState} of the subcluster. + * + * @param state the state of the subCluster + */ + @Private + @Unstable + public abstract void setState(SubClusterState state); + + /** + * Get the timestamp representing the last start time of the subcluster. + * + * @return the timestamp representing the last start time of the subcluster + */ + @Public + @Unstable + public abstract long getLastStartTime(); + + /** + * Set the timestamp representing the last start time of the subcluster. + * + * @param lastStartTime the timestamp representing the last start time of the + * subcluster + */ + @Private + @Unstable + public abstract void setLastStartTime(long lastStartTime); + + /** + * Get the current capacity and utilization of the subcluster. This is the + * JAXB marshalled string representation of the ClusterMetrics. + * + * @return the current capacity and utilization of the subcluster + */ + @Public + @Unstable + public abstract String getCapability(); + + /** + * Set the current capacity and utilization of the subCluster. This is the + * JAXB marshalled string representation of the ClusterMetrics. + * + * @param capability the current capacity and utilization of the subcluster + */ + @Private + @Unstable + public abstract void setCapability(String capability); + + @Override + public String toString() { + return "SubClusterInfo [getSubClusterId() = " + getSubClusterId() + + ", getAMRMServiceAddress() = " + getAMRMServiceAddress() + + ", getClientRMServiceAddress() = " + getClientRMServiceAddress() + + ", getRMAdminServiceAddress() = " + getRMAdminServiceAddress() + + ", getRMWebServiceAddress() = " + getRMWebServiceAddress() + + ", getState() = " + getState() + ", getLastStartTime() = " + + getLastStartTime() + ", getCapability() = " + getCapability() + "]"; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SubClusterInfo other = (SubClusterInfo) obj; + if (!this.getSubClusterId().equals(other.getSubClusterId())) { + return false; + } + if (!this.getAMRMServiceAddress().equals(other.getAMRMServiceAddress())) { + return false; + } + if (!this.getClientRMServiceAddress() + .equals(other.getClientRMServiceAddress())) { + return false; + } + if (!this.getRMAdminServiceAddress() + .equals(other.getRMAdminServiceAddress())) { + return false; + } + if (!this.getRMWebServiceAddress().equals(other.getRMWebServiceAddress())) { + return false; + } + if (!this.getState().equals(other.getState())) { + return false; + } + return this.getLastStartTime() == other.getLastStartTime(); + // Capability and HeartBeat fields are not included as they are temporal + // (i.e. timestamps), so they change during the lifetime of the same + // sub-cluster + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((getSubClusterId() == null) ? 0 : getSubClusterId().hashCode()); + result = prime * result + ((getAMRMServiceAddress() == null) ? 0 + : getAMRMServiceAddress().hashCode()); + result = prime * result + ((getClientRMServiceAddress() == null) ? 0 + : getClientRMServiceAddress().hashCode()); + result = prime * result + ((getRMAdminServiceAddress() == null) ? 0 + : getRMAdminServiceAddress().hashCode()); + result = prime * result + ((getRMWebServiceAddress() == null) ? 0 + : getRMWebServiceAddress().hashCode()); + result = + prime * result + ((getState() == null) ? 0 : getState().hashCode()); + result = prime * result + + (int) (getLastStartTime() ^ (getLastStartTime() >>> 32)); + return result; + // Capability and HeartBeat fields are not included as they are temporal + // (i.e. timestamps), so they change during the lifetime of the same + // sub-cluster + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java new file mode 100644 index 00000000000..52807d97dbd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +import java.nio.ByteBuffer; + +// used in javadoc + +/** + * {@link SubClusterPolicyConfiguration} is a class that represents a + * configuration of a policy. For a single queue, it contains a policy type + * (resolve to a class name) and its params as an opaque {@link ByteBuffer}. + * + * Note: by design the params are an opaque ByteBuffer, this allows for enough + * flexibility to evolve the policies without impacting the protocols to/from + * the federation state store. + */ +@Private +@Unstable +public abstract class SubClusterPolicyConfiguration { + + + @Private + @Unstable + public static SubClusterPolicyConfiguration newInstance(String queue, + String policyType, ByteBuffer policyParams) { + SubClusterPolicyConfiguration policy = + Records.newRecord(SubClusterPolicyConfiguration.class); + policy.setQueue(queue); + policy.setType(policyType); + policy.setParams(policyParams); + return policy; + } + + @Private + @Unstable + public static SubClusterPolicyConfiguration newInstance( + SubClusterPolicyConfiguration conf) { + SubClusterPolicyConfiguration policy = + Records.newRecord(SubClusterPolicyConfiguration.class); + policy.setQueue(conf.getQueue()); + policy.setType(conf.getType()); + policy.setParams(conf.getParams()); + return policy; + } + + /** + * Get the name of the queue for which we are configuring a policy. + * + * @return the name of the queue + */ + @Public + @Unstable + public abstract String getQueue(); + + /** + * Sets the name of the queue for which we are configuring a policy. + * + * @param queueName the name of the queue + */ + @Private + @Unstable + public abstract void setQueue(String queueName); + + /** + * Get the type of the policy. This could be random, round-robin, load-based, + * etc. + * + * @return the type of the policy + */ + @Public + @Unstable + public abstract String getType(); + + /** + * Sets the type of the policy. This could be random, round-robin, load-based, + * etc. + * + * @param policyType the type of the policy + */ + @Private + @Unstable + public abstract void setType(String policyType); + + /** + * Get the policy parameters. This affects how the policy behaves and an + * example could be weight distribution of queues across multiple + * sub-clusters. + * + * @return the byte array that contains the parameters + */ + @Public + @Unstable + public abstract ByteBuffer getParams(); + + /** + * Set the policy parameters. This affects how the policy behaves and an + * example could be weight distribution of queues across multiple + * sub-clusters. + * + * @param policyParams byte array that describes the policy + */ + @Private + @Unstable + public abstract void setParams(ByteBuffer policyParams); + + @Override + public int hashCode() { + return 31 * getParams().hashCode() + getType().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SubClusterPolicyConfiguration other = (SubClusterPolicyConfiguration) obj; + if (!this.getType().equals(other.getType())) { + return false; + } + if (!this.getParams().equals(other.getParams())) { + return false; + } + return true; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getType()); + sb.append(" : "); + sb.append(getParams()); + return sb.toString(); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterRegisterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterRegisterRequest.java new file mode 100644 index 00000000000..8864fe30cab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterRegisterRequest.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * SubClusterRegisterRequest is a request by a sub-cluster + * {@code ResourceManager} to participate in federation. + * + *

+ * It includes information such as: + *

    + *
  • {@link SubClusterId}
  • + *
  • The URL of the subcluster
  • + *
  • The timestamp representing the last start time of the subCluster
  • + *
  • {@code FederationsubClusterState}
  • + *
  • The current capacity and utilization of the subCluster
  • + *
+ */ +@Private +@Unstable +public abstract class SubClusterRegisterRequest { + + @Private + @Unstable + public static SubClusterRegisterRequest newInstance( + SubClusterInfo subClusterInfo) { + SubClusterRegisterRequest registerSubClusterRequest = + Records.newRecord(SubClusterRegisterRequest.class); + registerSubClusterRequest.setSubClusterInfo(subClusterInfo); + return registerSubClusterRequest; + } + + /** + * Get the {@link SubClusterInfo} encapsulating the information about the + * sub-cluster. + * + * @return the information pertaining to the sub-cluster + */ + @Public + @Unstable + public abstract SubClusterInfo getSubClusterInfo(); + + /** + * Set the {@link SubClusterInfo} encapsulating the information about the + * sub-cluster. + * + * @param subClusterInfo the information pertaining to the sub-cluster + */ + @Public + @Unstable + public abstract void setSubClusterInfo(SubClusterInfo subClusterInfo); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterRegisterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterRegisterResponse.java new file mode 100644 index 00000000000..060a8573fba --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterRegisterResponse.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * SubClusterRegisterResponse contains the response from the {@code + * FederationMembershipStateStore} to a registration request from a + * ResourceManager to participate in federation. + * + * Currently response is empty if the operation was successful, if not an + * exception reporting reason for a failure. + */ +@Private +@Unstable +public abstract class SubClusterRegisterResponse { + + @Private + @Unstable + public static SubClusterRegisterResponse newInstance() { + SubClusterRegisterResponse response = + Records.newRecord(SubClusterRegisterResponse.class); + return response; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterState.java new file mode 100644 index 00000000000..b30bd32fd02 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterState.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + *

+ * State of a SubCluster. + *

+ */ +@Private +@Unstable +public enum SubClusterState { + /** Newly registered subcluster, before the first heartbeat. */ + SC_NEW, + + /** Subcluster is registered and the RM sent a heartbeat recently. */ + SC_RUNNING, + + /** Subcluster is unhealthy. */ + SC_UNHEALTHY, + + /** Subcluster is in the process of being out of service. */ + SC_DECOMMISSIONING, + + /** Subcluster is out of service. */ + SC_DECOMMISSIONED, + + /** RM has not sent a heartbeat for some configured time threshold. */ + SC_LOST, + + /** Subcluster has unregistered. */ + SC_UNREGISTERED; + + public boolean isUnusable() { + return (this != SC_RUNNING && this != SC_NEW); + } + + public boolean isActive() { + return this == SC_RUNNING; + } + + public boolean isFinal() { + return (this == SC_UNREGISTERED || this == SC_DECOMMISSIONED + || this == SC_LOST); + } + + public static final Logger LOG = + LoggerFactory.getLogger(SubClusterState.class); + + /** + * Convert a string into {@code SubClusterState}. + * + * @param x the string to convert in SubClusterState + * @return the respective {@code SubClusterState} + */ + public static SubClusterState fromString(String x) { + try { + return SubClusterState.valueOf(x); + } catch (Exception e) { + LOG.error("Invalid SubCluster State value in the StateStore does not" + + " match with the YARN Federation standard."); + return null; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/UpdateApplicationHomeSubClusterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/UpdateApplicationHomeSubClusterRequest.java new file mode 100644 index 00000000000..eaa92523c83 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/UpdateApplicationHomeSubClusterRequest.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + *

+ * The request sent by the Router to + * Federation state store to update the home subcluster of a newly + * submitted application. + * + *

+ * The request includes the mapping details, i.e.: + *

    + *
  • {@code ApplicationId}
  • + *
  • {@code SubClusterId}
  • + *
+ */ +@Private +@Unstable +public abstract class UpdateApplicationHomeSubClusterRequest { + + @Private + @Unstable + public static UpdateApplicationHomeSubClusterRequest newInstance( + ApplicationHomeSubCluster applicationHomeSubCluster) { + UpdateApplicationHomeSubClusterRequest updateApplicationRequest = + Records.newRecord(UpdateApplicationHomeSubClusterRequest.class); + updateApplicationRequest + .setApplicationHomeSubCluster(applicationHomeSubCluster); + return updateApplicationRequest; + } + + /** + * Get the {@link ApplicationHomeSubCluster} representing the mapping of the + * application to it's home sub-cluster. + * + * @return the mapping of the application to it's home sub-cluster. + */ + @Public + @Unstable + public abstract ApplicationHomeSubCluster getApplicationHomeSubCluster(); + + /** + * Set the {@link ApplicationHomeSubCluster} representing the mapping of the + * application to it's home sub-cluster. + * + * @param applicationHomeSubCluster the mapping of the application to it's + * home sub-cluster. + */ + @Private + @Unstable + public abstract void setApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationHomeSubCluster); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/UpdateApplicationHomeSubClusterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/UpdateApplicationHomeSubClusterResponse.java new file mode 100644 index 00000000000..743433551d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/UpdateApplicationHomeSubClusterResponse.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * UpdateApplicationHomeSubClusterResponse contains the answer from the + * {@code FederationApplicationHomeSubClusterStore} to a request to register the + * home subcluster of a submitted application. Currently response is empty if + * the operation was successful, if not an exception reporting reason for a + * failure. + */ +@Private +@Unstable +public abstract class UpdateApplicationHomeSubClusterResponse { + + @Private + @Unstable + public static UpdateApplicationHomeSubClusterResponse newInstance() { + UpdateApplicationHomeSubClusterResponse response = + Records.newRecord(UpdateApplicationHomeSubClusterResponse.class); + return response; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/AddApplicationHomeSubClusterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/AddApplicationHomeSubClusterRequestPBImpl.java new file mode 100644 index 00000000000..2387cde7b51 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/AddApplicationHomeSubClusterRequestPBImpl.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.AddApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.AddApplicationHomeSubClusterRequestProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link AddApplicationHomeSubClusterRequest}. + */ +@Private +@Unstable +public class AddApplicationHomeSubClusterRequestPBImpl + extends AddApplicationHomeSubClusterRequest { + + private AddApplicationHomeSubClusterRequestProto proto = + AddApplicationHomeSubClusterRequestProto.getDefaultInstance(); + private AddApplicationHomeSubClusterRequestProto.Builder builder = null; + private boolean viaProto = false; + + public AddApplicationHomeSubClusterRequestPBImpl() { + builder = AddApplicationHomeSubClusterRequestProto.newBuilder(); + } + + public AddApplicationHomeSubClusterRequestPBImpl( + AddApplicationHomeSubClusterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public AddApplicationHomeSubClusterRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = AddApplicationHomeSubClusterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public ApplicationHomeSubCluster getApplicationHomeSubCluster() { + AddApplicationHomeSubClusterRequestProtoOrBuilder p = + viaProto ? proto : builder; + if (!p.hasAppSubclusterMap()) { + return null; + } + return convertFromProtoFormat(p.getAppSubclusterMap()); + } + + @Override + public void setApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationInfo) { + maybeInitBuilder(); + if (applicationInfo == null) { + builder.clearAppSubclusterMap(); + return; + } + builder.setAppSubclusterMap(convertToProtoFormat(applicationInfo)); + } + + private ApplicationHomeSubCluster convertFromProtoFormat( + ApplicationHomeSubClusterProto sc) { + return new ApplicationHomeSubClusterPBImpl(sc); + } + + private ApplicationHomeSubClusterProto convertToProtoFormat( + ApplicationHomeSubCluster sc) { + return ((ApplicationHomeSubClusterPBImpl) sc).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/AddApplicationHomeSubClusterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/AddApplicationHomeSubClusterResponsePBImpl.java new file mode 100644 index 00000000000..b30c41c415a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/AddApplicationHomeSubClusterResponsePBImpl.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.AddApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.AddApplicationHomeSubClusterResponseProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link AddApplicationHomeSubClusterResponse}. + */ +@Private +@Unstable +public class AddApplicationHomeSubClusterResponsePBImpl + extends AddApplicationHomeSubClusterResponse { + + private AddApplicationHomeSubClusterResponseProto proto = + AddApplicationHomeSubClusterResponseProto.getDefaultInstance(); + private AddApplicationHomeSubClusterResponseProto.Builder builder = null; + private boolean viaProto = false; + + public AddApplicationHomeSubClusterResponsePBImpl() { + builder = AddApplicationHomeSubClusterResponseProto.newBuilder(); + } + + public AddApplicationHomeSubClusterResponsePBImpl( + AddApplicationHomeSubClusterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = AddApplicationHomeSubClusterResponseProto.newBuilder(proto); + } + viaProto = false; + } + + public AddApplicationHomeSubClusterResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public void setHomeSubCluster(SubClusterId homeSubCluster) { + maybeInitBuilder(); + if (homeSubCluster == null) { + builder.clearHomeSubCluster(); + return; + } + builder.setHomeSubCluster(convertToProtoFormat(homeSubCluster)); + } + + @Override + public SubClusterId getHomeSubCluster() { + AddApplicationHomeSubClusterResponseProtoOrBuilder p = + viaProto ? proto : builder; + + if (!p.hasHomeSubCluster()) { + return null; + } + return convertFromProtoFormat(p.getHomeSubCluster()); + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private SubClusterId convertFromProtoFormat(SubClusterIdProto sc) { + return new SubClusterIdPBImpl(sc); + } + + private SubClusterIdProto convertToProtoFormat(SubClusterId sc) { + return ((SubClusterIdPBImpl) sc).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java new file mode 100644 index 00000000000..7e6a564f1dd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/ApplicationHomeSubClusterPBImpl.java @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link ApplicationHomeSubCluster}. + */ +@Private +@Unstable +public class ApplicationHomeSubClusterPBImpl extends ApplicationHomeSubCluster { + + private ApplicationHomeSubClusterProto proto = + ApplicationHomeSubClusterProto.getDefaultInstance(); + private ApplicationHomeSubClusterProto.Builder builder = null; + private boolean viaProto = false; + + private ApplicationId applicationId = null; + private SubClusterId homeSubCluster = null; + + public ApplicationHomeSubClusterPBImpl() { + builder = ApplicationHomeSubClusterProto.newBuilder(); + } + + public ApplicationHomeSubClusterPBImpl(ApplicationHomeSubClusterProto proto) { + this.proto = proto; + viaProto = true; + } + + public ApplicationHomeSubClusterProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ApplicationHomeSubClusterProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.applicationId != null) { + builder.setApplicationId(convertToProtoFormat(this.applicationId)); + } + if (this.homeSubCluster != null) { + builder.setHomeSubCluster(convertToProtoFormat(this.homeSubCluster)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public ApplicationId getApplicationId() { + ApplicationHomeSubClusterProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasApplicationId()) { + return null; + } + this.applicationId = convertFromProtoFormat(p.getApplicationId()); + return applicationId; + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + maybeInitBuilder(); + if (applicationId == null) { + builder.clearApplicationId(); + return; + } + this.applicationId = applicationId; + } + + @Override + public SubClusterId getHomeSubCluster() { + ApplicationHomeSubClusterProtoOrBuilder p = viaProto ? proto : builder; + if (this.homeSubCluster != null) { + return this.homeSubCluster; + } + if (!p.hasHomeSubCluster()) { + return null; + } + this.homeSubCluster = convertFromProtoFormat(p.getHomeSubCluster()); + return this.homeSubCluster; + } + + @Override + public void setHomeSubCluster(SubClusterId homeSubCluster) { + maybeInitBuilder(); + if (homeSubCluster == null) { + builder.clearHomeSubCluster(); + } + this.homeSubCluster = homeSubCluster; + } + + private SubClusterId convertFromProtoFormat(SubClusterIdProto subClusterId) { + return new SubClusterIdPBImpl(subClusterId); + } + + private SubClusterIdProto convertToProtoFormat(SubClusterId subClusterId) { + return ((SubClusterIdPBImpl) subClusterId).getProto(); + } + + private ApplicationId convertFromProtoFormat(ApplicationIdProto appId) { + return new ApplicationIdPBImpl(appId); + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId appId) { + return ((ApplicationIdPBImpl) appId).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/DeleteApplicationHomeSubClusterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/DeleteApplicationHomeSubClusterRequestPBImpl.java new file mode 100644 index 00000000000..b4ef680f64f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/DeleteApplicationHomeSubClusterRequestPBImpl.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.DeleteApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.DeleteApplicationHomeSubClusterRequestProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link DeleteApplicationHomeSubClusterRequest}. + */ +@Private +@Unstable +public class DeleteApplicationHomeSubClusterRequestPBImpl + extends DeleteApplicationHomeSubClusterRequest { + + private DeleteApplicationHomeSubClusterRequestProto proto = + DeleteApplicationHomeSubClusterRequestProto.getDefaultInstance(); + private DeleteApplicationHomeSubClusterRequestProto.Builder builder = null; + private boolean viaProto = false; + + public DeleteApplicationHomeSubClusterRequestPBImpl() { + builder = DeleteApplicationHomeSubClusterRequestProto.newBuilder(); + } + + public DeleteApplicationHomeSubClusterRequestPBImpl( + DeleteApplicationHomeSubClusterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public DeleteApplicationHomeSubClusterRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = DeleteApplicationHomeSubClusterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public ApplicationId getApplicationId() { + DeleteApplicationHomeSubClusterRequestProtoOrBuilder p = + viaProto ? proto : builder; + if (!p.hasApplicationId()) { + return null; + } + return convertFromProtoFormat(p.getApplicationId()); + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + maybeInitBuilder(); + if (applicationId == null) { + builder.clearApplicationId(); + return; + } + builder.setApplicationId(convertToProtoFormat(applicationId)); + } + + private ApplicationId convertFromProtoFormat(ApplicationIdProto appId) { + return new ApplicationIdPBImpl(appId); + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId appId) { + return ((ApplicationIdPBImpl) appId).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/DeleteApplicationHomeSubClusterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/DeleteApplicationHomeSubClusterResponsePBImpl.java new file mode 100644 index 00000000000..8a37b3c42e4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/DeleteApplicationHomeSubClusterResponsePBImpl.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.DeleteApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link DeleteApplicationHomeSubClusterResponse}. + */ +@Private +@Unstable +public class DeleteApplicationHomeSubClusterResponsePBImpl + extends DeleteApplicationHomeSubClusterResponse { + + private DeleteApplicationHomeSubClusterResponseProto proto = + DeleteApplicationHomeSubClusterResponseProto.getDefaultInstance(); + private DeleteApplicationHomeSubClusterResponseProto.Builder builder = null; + private boolean viaProto = false; + + public DeleteApplicationHomeSubClusterResponsePBImpl() { + builder = DeleteApplicationHomeSubClusterResponseProto.newBuilder(); + } + + public DeleteApplicationHomeSubClusterResponsePBImpl( + DeleteApplicationHomeSubClusterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public DeleteApplicationHomeSubClusterResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationHomeSubClusterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationHomeSubClusterRequestPBImpl.java new file mode 100644 index 00000000000..585ba81df3a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationHomeSubClusterRequestPBImpl.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationHomeSubClusterRequestProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetApplicationHomeSubClusterRequest}. + */ +@Private +@Unstable +public class GetApplicationHomeSubClusterRequestPBImpl + extends GetApplicationHomeSubClusterRequest { + + private GetApplicationHomeSubClusterRequestProto proto = + GetApplicationHomeSubClusterRequestProto.getDefaultInstance(); + private GetApplicationHomeSubClusterRequestProto.Builder builder = null; + private boolean viaProto = false; + + private ApplicationId applicationId = null; + + public GetApplicationHomeSubClusterRequestPBImpl() { + builder = GetApplicationHomeSubClusterRequestProto.newBuilder(); + } + + public GetApplicationHomeSubClusterRequestPBImpl( + GetApplicationHomeSubClusterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationHomeSubClusterRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationHomeSubClusterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.applicationId != null) { + builder.setApplicationId(convertToProtoFormat(this.applicationId)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public ApplicationId getApplicationId() { + GetApplicationHomeSubClusterRequestProtoOrBuilder p = + viaProto ? proto : builder; + if (applicationId != null) { + return applicationId; + } + + if (!p.hasApplicationId()) { + return null; + } + this.applicationId = convertFromProtoFormat(p.getApplicationId()); + return applicationId; + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + maybeInitBuilder(); + if (applicationId == null) { + builder.clearApplicationId(); + return; + } + this.applicationId = applicationId; + } + + private ApplicationId convertFromProtoFormat(ApplicationIdProto appId) { + return new ApplicationIdPBImpl(appId); + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId appId) { + return ((ApplicationIdPBImpl) appId).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationHomeSubClusterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationHomeSubClusterResponsePBImpl.java new file mode 100644 index 00000000000..11804882fba --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationHomeSubClusterResponsePBImpl.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationHomeSubClusterResponseProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetApplicationHomeSubClusterResponse}. + */ +@Private +@Unstable +public class GetApplicationHomeSubClusterResponsePBImpl + extends GetApplicationHomeSubClusterResponse { + + private GetApplicationHomeSubClusterResponseProto proto = + GetApplicationHomeSubClusterResponseProto.getDefaultInstance(); + private GetApplicationHomeSubClusterResponseProto.Builder builder = null; + private boolean viaProto = false; + + public GetApplicationHomeSubClusterResponsePBImpl() { + builder = GetApplicationHomeSubClusterResponseProto.newBuilder(); + } + + public GetApplicationHomeSubClusterResponsePBImpl( + GetApplicationHomeSubClusterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationHomeSubClusterResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationHomeSubClusterResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public ApplicationHomeSubCluster getApplicationHomeSubCluster() { + GetApplicationHomeSubClusterResponseProtoOrBuilder p = + viaProto ? proto : builder; + if (!p.hasAppSubclusterMap()) { + return null; + } + return convertFromProtoFormat(p.getAppSubclusterMap()); + } + + @Override + public void setApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationInfo) { + maybeInitBuilder(); + if (applicationInfo == null) { + builder.clearAppSubclusterMap(); + return; + } + builder.setAppSubclusterMap(convertToProtoFormat(applicationInfo)); + } + + private ApplicationHomeSubCluster convertFromProtoFormat( + ApplicationHomeSubClusterProto sc) { + return new ApplicationHomeSubClusterPBImpl(sc); + } + + private ApplicationHomeSubClusterProto convertToProtoFormat( + ApplicationHomeSubCluster sc) { + return ((ApplicationHomeSubClusterPBImpl) sc).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java new file mode 100644 index 00000000000..3ce8d7447ae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterRequestPBImpl.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetApplicationsHomeSubClusterRequest}. + */ +@Private +@Unstable +public class GetApplicationsHomeSubClusterRequestPBImpl + extends GetApplicationsHomeSubClusterRequest { + + private GetApplicationsHomeSubClusterRequestProto proto = + GetApplicationsHomeSubClusterRequestProto.getDefaultInstance(); + private GetApplicationsHomeSubClusterRequestProto.Builder builder = null; + private boolean viaProto = false; + + public GetApplicationsHomeSubClusterRequestPBImpl() { + builder = GetApplicationsHomeSubClusterRequestProto.newBuilder(); + } + + public GetApplicationsHomeSubClusterRequestPBImpl( + GetApplicationsHomeSubClusterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationsHomeSubClusterRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java new file mode 100644 index 00000000000..8b72a1e0b56 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetApplicationsHomeSubClusterResponsePBImpl.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetApplicationsHomeSubClusterResponse}. + */ +@Private +@Unstable +public class GetApplicationsHomeSubClusterResponsePBImpl + extends GetApplicationsHomeSubClusterResponse { + + private GetApplicationsHomeSubClusterResponseProto proto = + GetApplicationsHomeSubClusterResponseProto.getDefaultInstance(); + private GetApplicationsHomeSubClusterResponseProto.Builder builder = null; + private boolean viaProto = false; + + private List appsHomeSubCluster; + + public GetApplicationsHomeSubClusterResponsePBImpl() { + builder = GetApplicationsHomeSubClusterResponseProto.newBuilder(); + } + + public GetApplicationsHomeSubClusterResponsePBImpl( + GetApplicationsHomeSubClusterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetApplicationsHomeSubClusterResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetApplicationsHomeSubClusterResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.appsHomeSubCluster != null) { + addSubClustersInfoToProto(); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public List getAppsHomeSubClusters() { + initSubClustersInfoList(); + return appsHomeSubCluster; + } + + @Override + public void setAppsHomeSubClusters( + List appsHomeSubClusters) { + maybeInitBuilder(); + if (appsHomeSubClusters == null) { + builder.clearAppSubclusterMap(); + return; + } + this.appsHomeSubCluster = appsHomeSubClusters; + } + + private void initSubClustersInfoList() { + if (this.appsHomeSubCluster != null) { + return; + } + GetApplicationsHomeSubClusterResponseProtoOrBuilder p = + viaProto ? proto : builder; + List subClusterInfosList = + p.getAppSubclusterMapList(); + appsHomeSubCluster = new ArrayList(); + + for (ApplicationHomeSubClusterProto r : subClusterInfosList) { + appsHomeSubCluster.add(convertFromProtoFormat(r)); + } + } + + private void addSubClustersInfoToProto() { + maybeInitBuilder(); + builder.clearAppSubclusterMap(); + if (appsHomeSubCluster == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + private Iterator iter = + appsHomeSubCluster.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public ApplicationHomeSubClusterProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + + } + + }; + builder.addAllAppSubclusterMap(iterable); + } + + private ApplicationHomeSubCluster convertFromProtoFormat( + ApplicationHomeSubClusterProto sc) { + return new ApplicationHomeSubClusterPBImpl(sc); + } + + private ApplicationHomeSubClusterProto convertToProtoFormat( + ApplicationHomeSubCluster sc) { + return ((ApplicationHomeSubClusterPBImpl) sc).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterInfoRequestPBImpl.java new file mode 100644 index 00000000000..c61c4191b99 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterInfoRequestPBImpl.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterInfoRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterInfoRequestProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link GetSubClusterInfoRequest}. + */ +@Private +@Unstable +public class GetSubClusterInfoRequestPBImpl extends GetSubClusterInfoRequest { + + private GetSubClusterInfoRequestProto proto = + GetSubClusterInfoRequestProto.getDefaultInstance(); + private GetSubClusterInfoRequestProto.Builder builder = null; + private boolean viaProto = false; + + public GetSubClusterInfoRequestPBImpl() { + builder = GetSubClusterInfoRequestProto.newBuilder(); + } + + public GetSubClusterInfoRequestPBImpl(GetSubClusterInfoRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClusterInfoRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetSubClusterInfoRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterId getSubClusterId() { + GetSubClusterInfoRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasSubClusterId()) { + return null; + } + return convertFromProtoFormat(p.getSubClusterId()); + } + + @Override + public void setSubClusterId(SubClusterId subClusterId) { + maybeInitBuilder(); + if (subClusterId == null) { + builder.clearSubClusterId(); + return; + } + builder.setSubClusterId(convertToProtoFormat(subClusterId)); + } + + private SubClusterId convertFromProtoFormat(SubClusterIdProto sc) { + return new SubClusterIdPBImpl(sc); + } + + private SubClusterIdProto convertToProtoFormat(SubClusterId sc) { + return ((SubClusterIdPBImpl) sc).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterInfoResponsePBImpl.java new file mode 100644 index 00000000000..d0bcc33db87 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterInfoResponsePBImpl.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterInfoResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterInfoResponseProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link GetSubClusterInfoResponse}. + */ +@Private +@Unstable +public class GetSubClusterInfoResponsePBImpl extends GetSubClusterInfoResponse { + + private GetSubClusterInfoResponseProto proto = + GetSubClusterInfoResponseProto.getDefaultInstance(); + private GetSubClusterInfoResponseProto.Builder builder = null; + private boolean viaProto = false; + + private SubClusterInfo subClusterInfo = null; + + public GetSubClusterInfoResponsePBImpl() { + builder = GetSubClusterInfoResponseProto.newBuilder(); + } + + public GetSubClusterInfoResponsePBImpl(GetSubClusterInfoResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClusterInfoResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetSubClusterInfoResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterInfo != null) { + builder.setSubClusterInfo(convertToProtoFormat(this.subClusterInfo)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterInfo getSubClusterInfo() { + GetSubClusterInfoResponseProtoOrBuilder p = viaProto ? proto : builder; + if (this.subClusterInfo != null) { + return this.subClusterInfo; + } + if (!p.hasSubClusterInfo()) { + return null; + } + this.subClusterInfo = convertFromProtoFormat(p.getSubClusterInfo()); + return this.subClusterInfo; + } + + @Override + public void setSubClusterInfo(SubClusterInfo subClusterInfo) { + maybeInitBuilder(); + if (subClusterInfo == null) { + builder.clearSubClusterInfo(); + } + this.subClusterInfo = subClusterInfo; + } + + private SubClusterInfo convertFromProtoFormat( + SubClusterInfoProto clusterInfo) { + return new SubClusterInfoPBImpl(clusterInfo); + } + + private SubClusterInfoProto convertToProtoFormat(SubClusterInfo clusterInfo) { + return ((SubClusterInfoPBImpl) clusterInfo).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPoliciesConfigurationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPoliciesConfigurationsRequestPBImpl.java new file mode 100644 index 00000000000..3cb66011ab6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPoliciesConfigurationsRequestPBImpl.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPoliciesConfigurationsRequestProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetSubClusterPoliciesConfigurationsRequest}. + */ +@Private +@Unstable +public class GetSubClusterPoliciesConfigurationsRequestPBImpl + extends GetSubClusterPoliciesConfigurationsRequest { + + private GetSubClusterPoliciesConfigurationsRequestProto proto = + GetSubClusterPoliciesConfigurationsRequestProto.getDefaultInstance(); + private GetSubClusterPoliciesConfigurationsRequestProto.Builder builder = + null; + private boolean viaProto = false; + + public GetSubClusterPoliciesConfigurationsRequestPBImpl() { + builder = GetSubClusterPoliciesConfigurationsRequestProto.newBuilder(); + } + + public GetSubClusterPoliciesConfigurationsRequestPBImpl( + GetSubClusterPoliciesConfigurationsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClusterPoliciesConfigurationsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = + GetSubClusterPoliciesConfigurationsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPoliciesConfigurationsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPoliciesConfigurationsResponsePBImpl.java new file mode 100644 index 00000000000..67c3654d634 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPoliciesConfigurationsResponsePBImpl.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPoliciesConfigurationsResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPoliciesConfigurationsResponseProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetSubClusterPoliciesConfigurationsResponse}. + */ +@Private +@Unstable +public class GetSubClusterPoliciesConfigurationsResponsePBImpl + extends GetSubClusterPoliciesConfigurationsResponse { + + private GetSubClusterPoliciesConfigurationsResponseProto proto = + GetSubClusterPoliciesConfigurationsResponseProto.getDefaultInstance(); + private GetSubClusterPoliciesConfigurationsResponseProto.Builder builder = + null; + private boolean viaProto = false; + + private List subClusterPolicies = null; + + public GetSubClusterPoliciesConfigurationsResponsePBImpl() { + builder = GetSubClusterPoliciesConfigurationsResponseProto.newBuilder(); + } + + public GetSubClusterPoliciesConfigurationsResponsePBImpl( + GetSubClusterPoliciesConfigurationsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClusterPoliciesConfigurationsResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = + GetSubClusterPoliciesConfigurationsResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterPolicies != null) { + addSubClusterPoliciesConfigurationsToProto(); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public List getPoliciesConfigs() { + initSubClusterPoliciesConfigurationsList(); + return this.subClusterPolicies; + } + + @Override + public void setPoliciesConfigs( + List policyConfigurations) { + maybeInitBuilder(); + if (policyConfigurations == null) { + builder.clearPoliciesConfigurations(); + } + this.subClusterPolicies = policyConfigurations; + } + + private void initSubClusterPoliciesConfigurationsList() { + if (this.subClusterPolicies != null) { + return; + } + GetSubClusterPoliciesConfigurationsResponseProtoOrBuilder p = + viaProto ? proto : builder; + List subClusterPoliciesList = + p.getPoliciesConfigurationsList(); + subClusterPolicies = new ArrayList(); + + for (SubClusterPolicyConfigurationProto r : subClusterPoliciesList) { + subClusterPolicies.add(convertFromProtoFormat(r)); + } + } + + private void addSubClusterPoliciesConfigurationsToProto() { + maybeInitBuilder(); + builder.clearPoliciesConfigurations(); + if (subClusterPolicies == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + private Iterator iter = + subClusterPolicies.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public SubClusterPolicyConfigurationProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + + } + + }; + builder.addAllPoliciesConfigurations(iterable); + } + + private SubClusterPolicyConfiguration convertFromProtoFormat( + SubClusterPolicyConfigurationProto policy) { + return new SubClusterPolicyConfigurationPBImpl(policy); + } + + private SubClusterPolicyConfigurationProto convertToProtoFormat( + SubClusterPolicyConfiguration policy) { + return ((SubClusterPolicyConfigurationPBImpl) policy).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPolicyConfigurationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPolicyConfigurationRequestPBImpl.java new file mode 100644 index 00000000000..35aff172cec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPolicyConfigurationRequestPBImpl.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPolicyConfigurationRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPolicyConfigurationRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetSubClusterPolicyConfigurationRequest}. + */ +@Private +@Unstable +public class GetSubClusterPolicyConfigurationRequestPBImpl + extends GetSubClusterPolicyConfigurationRequest { + + private GetSubClusterPolicyConfigurationRequestProto proto = + GetSubClusterPolicyConfigurationRequestProto.getDefaultInstance(); + private GetSubClusterPolicyConfigurationRequestProto.Builder builder = null; + private boolean viaProto = false; + + public GetSubClusterPolicyConfigurationRequestPBImpl() { + builder = GetSubClusterPolicyConfigurationRequestProto.newBuilder(); + } + + public GetSubClusterPolicyConfigurationRequestPBImpl( + GetSubClusterPolicyConfigurationRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClusterPolicyConfigurationRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetSubClusterPolicyConfigurationRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public String getQueue() { + GetSubClusterPolicyConfigurationRequestProtoOrBuilder p = + viaProto ? proto : builder; + return p.getQueue(); + } + + @Override + public void setQueue(String queueName) { + maybeInitBuilder(); + if (queueName == null) { + builder.clearQueue(); + return; + } + builder.setQueue(queueName); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPolicyConfigurationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPolicyConfigurationResponsePBImpl.java new file mode 100644 index 00000000000..96b88ae7607 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClusterPolicyConfigurationResponsePBImpl.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPolicyConfigurationResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPolicyConfigurationResponseProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link GetSubClusterPolicyConfigurationResponse}. + */ +@Private +@Unstable +public class GetSubClusterPolicyConfigurationResponsePBImpl + extends GetSubClusterPolicyConfigurationResponse { + + private GetSubClusterPolicyConfigurationResponseProto proto = + GetSubClusterPolicyConfigurationResponseProto.getDefaultInstance(); + private GetSubClusterPolicyConfigurationResponseProto.Builder builder = null; + private boolean viaProto = false; + + private SubClusterPolicyConfiguration subClusterPolicy = null; + + public GetSubClusterPolicyConfigurationResponsePBImpl() { + builder = GetSubClusterPolicyConfigurationResponseProto.newBuilder(); + } + + public GetSubClusterPolicyConfigurationResponsePBImpl( + GetSubClusterPolicyConfigurationResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClusterPolicyConfigurationResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetSubClusterPolicyConfigurationResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterPolicy != null + && !((SubClusterPolicyConfigurationPBImpl) this.subClusterPolicy) + .getProto().equals(builder.getPolicyConfiguration())) { + builder + .setPolicyConfiguration(convertToProtoFormat(this.subClusterPolicy)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterPolicyConfiguration getPolicyConfiguration() { + GetSubClusterPolicyConfigurationResponseProtoOrBuilder p = + viaProto ? proto : builder; + if (this.subClusterPolicy != null) { + return this.subClusterPolicy; + } + if (!p.hasPolicyConfiguration()) { + return null; + } + this.subClusterPolicy = convertFromProtoFormat(p.getPolicyConfiguration()); + return this.subClusterPolicy; + } + + @Override + public void setPolicyConfiguration( + SubClusterPolicyConfiguration policyConfiguration) { + maybeInitBuilder(); + if (policyConfiguration == null) { + builder.clearPolicyConfiguration(); + } + this.subClusterPolicy = policyConfiguration; + } + + private SubClusterPolicyConfiguration convertFromProtoFormat( + SubClusterPolicyConfigurationProto policy) { + return new SubClusterPolicyConfigurationPBImpl(policy); + } + + private SubClusterPolicyConfigurationProto convertToProtoFormat( + SubClusterPolicyConfiguration policy) { + return ((SubClusterPolicyConfigurationPBImpl) policy).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClustersInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClustersInfoRequestPBImpl.java new file mode 100644 index 00000000000..2b848c0d285 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClustersInfoRequestPBImpl.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClustersInfoRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClustersInfoRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link GetSubClustersInfoRequest}. + */ +@Private +@Unstable +public class GetSubClustersInfoRequestPBImpl extends GetSubClustersInfoRequest { + + private GetSubClustersInfoRequestProto proto = + GetSubClustersInfoRequestProto.getDefaultInstance(); + private GetSubClustersInfoRequestProto.Builder builder = null; + private boolean viaProto = false; + + public GetSubClustersInfoRequestPBImpl() { + builder = GetSubClustersInfoRequestProto.newBuilder(); + } + + public GetSubClustersInfoRequestPBImpl(GetSubClustersInfoRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClustersInfoRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetSubClustersInfoRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public boolean getFilterInactiveSubClusters() { + GetSubClustersInfoRequestProtoOrBuilder p = viaProto ? proto : builder; + return p.getFilterInactiveSubclusters(); + } + + @Override + public void setFilterInactiveSubClusters(boolean filterInactiveSubClusters) { + maybeInitBuilder(); + builder.setFilterInactiveSubclusters(filterInactiveSubClusters); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClustersInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClustersInfoResponsePBImpl.java new file mode 100644 index 00000000000..2efa3b7380c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/GetSubClustersInfoResponsePBImpl.java @@ -0,0 +1,184 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClustersInfoResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClustersInfoResponseProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link GetSubClustersInfoResponse}. + */ +@Private +@Unstable +public class GetSubClustersInfoResponsePBImpl + extends GetSubClustersInfoResponse { + + private GetSubClustersInfoResponseProto proto = + GetSubClustersInfoResponseProto.getDefaultInstance(); + private GetSubClustersInfoResponseProto.Builder builder = null; + private boolean viaProto = false; + + private List subClusterInfos; + + public GetSubClustersInfoResponsePBImpl() { + builder = GetSubClustersInfoResponseProto.newBuilder(); + } + + public GetSubClustersInfoResponsePBImpl( + GetSubClustersInfoResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetSubClustersInfoResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToBuilder() { + if (this.subClusterInfos != null) { + addSubClusterInfosToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetSubClustersInfoResponseProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public List getSubClusters() { + initSubClustersInfoList(); + return subClusterInfos; + } + + @Override + public void setSubClusters(List subClusters) { + if (subClusters == null) { + builder.clearSubClusterInfos(); + return; + } + this.subClusterInfos = subClusters; + } + + private void initSubClustersInfoList() { + if (this.subClusterInfos != null) { + return; + } + GetSubClustersInfoResponseProtoOrBuilder p = viaProto ? proto : builder; + List subClusterInfosList = p.getSubClusterInfosList(); + subClusterInfos = new ArrayList(); + + for (SubClusterInfoProto r : subClusterInfosList) { + subClusterInfos.add(convertFromProtoFormat(r)); + } + } + + private void addSubClusterInfosToProto() { + maybeInitBuilder(); + builder.clearSubClusterInfos(); + if (subClusterInfos == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + private Iterator iter = + subClusterInfos.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public SubClusterInfoProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + + } + + }; + builder.addAllSubClusterInfos(iterable); + } + + private SubClusterInfoProto convertToProtoFormat(SubClusterInfo r) { + return ((SubClusterInfoPBImpl) r).getProto(); + } + + private SubClusterInfoPBImpl convertFromProtoFormat(SubClusterInfoProto r) { + return new SubClusterInfoPBImpl(r); + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SetSubClusterPolicyConfigurationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SetSubClusterPolicyConfigurationRequestPBImpl.java new file mode 100644 index 00000000000..7b7f89dba84 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SetSubClusterPolicyConfigurationRequestPBImpl.java @@ -0,0 +1,142 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SetSubClusterPolicyConfigurationRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SetSubClusterPolicyConfigurationRequestProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProto; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link SetSubClusterPolicyConfigurationRequest}. + */ +@Private +@Unstable +public class SetSubClusterPolicyConfigurationRequestPBImpl + extends SetSubClusterPolicyConfigurationRequest { + + private SetSubClusterPolicyConfigurationRequestProto proto = + SetSubClusterPolicyConfigurationRequestProto.getDefaultInstance(); + private SetSubClusterPolicyConfigurationRequestProto.Builder builder = null; + private boolean viaProto = false; + + private SubClusterPolicyConfiguration subClusterPolicy = null; + + public SetSubClusterPolicyConfigurationRequestPBImpl() { + builder = SetSubClusterPolicyConfigurationRequestProto.newBuilder(); + } + + public SetSubClusterPolicyConfigurationRequestPBImpl( + SetSubClusterPolicyConfigurationRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public SetSubClusterPolicyConfigurationRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SetSubClusterPolicyConfigurationRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterPolicy != null + && !((SubClusterPolicyConfigurationPBImpl) this.subClusterPolicy) + .getProto().equals(builder.getPolicyConfiguration())) { + builder + .setPolicyConfiguration(convertToProtoFormat(this.subClusterPolicy)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterPolicyConfiguration getPolicyConfiguration() { + SetSubClusterPolicyConfigurationRequestProtoOrBuilder p = + viaProto ? proto : builder; + if (this.subClusterPolicy != null) { + return this.subClusterPolicy; + } + if (!p.hasPolicyConfiguration()) { + return null; + } + this.subClusterPolicy = convertFromProtoFormat(p.getPolicyConfiguration()); + return this.subClusterPolicy; + } + + @Override + public void setPolicyConfiguration( + SubClusterPolicyConfiguration policyConfiguration) { + maybeInitBuilder(); + if (policyConfiguration == null) { + builder.clearPolicyConfiguration(); + } + this.subClusterPolicy = policyConfiguration; + } + + private SubClusterPolicyConfiguration convertFromProtoFormat( + SubClusterPolicyConfigurationProto policy) { + return new SubClusterPolicyConfigurationPBImpl(policy); + } + + private SubClusterPolicyConfigurationProto convertToProtoFormat( + SubClusterPolicyConfiguration policy) { + return ((SubClusterPolicyConfigurationPBImpl) policy).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SetSubClusterPolicyConfigurationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SetSubClusterPolicyConfigurationResponsePBImpl.java new file mode 100644 index 00000000000..9d792150892 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SetSubClusterPolicyConfigurationResponsePBImpl.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SetSubClusterPolicyConfigurationResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link SetSubClusterPolicyConfigurationResponse}. + */ +@Private +@Unstable +public class SetSubClusterPolicyConfigurationResponsePBImpl + extends SetSubClusterPolicyConfigurationResponse { + + private SetSubClusterPolicyConfigurationResponseProto proto = + SetSubClusterPolicyConfigurationResponseProto.getDefaultInstance(); + private SetSubClusterPolicyConfigurationResponseProto.Builder builder = null; + private boolean viaProto = false; + + public SetSubClusterPolicyConfigurationResponsePBImpl() { + builder = SetSubClusterPolicyConfigurationResponseProto.newBuilder(); + } + + public SetSubClusterPolicyConfigurationResponsePBImpl( + SetSubClusterPolicyConfigurationResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public SetSubClusterPolicyConfigurationResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SetSubClusterPolicyConfigurationResponseProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java new file mode 100644 index 00000000000..d4c5451471b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterStateProto; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterDeregisterRequest}. + */ +@Private +@Unstable +public class SubClusterDeregisterRequestPBImpl + extends SubClusterDeregisterRequest { + + private SubClusterDeregisterRequestProto proto = + SubClusterDeregisterRequestProto.getDefaultInstance(); + private SubClusterDeregisterRequestProto.Builder builder = null; + private boolean viaProto = false; + + public SubClusterDeregisterRequestPBImpl() { + builder = SubClusterDeregisterRequestProto.newBuilder(); + } + + public SubClusterDeregisterRequestPBImpl( + SubClusterDeregisterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterDeregisterRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SubClusterDeregisterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterId getSubClusterId() { + SubClusterDeregisterRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasSubClusterId()) { + return null; + } + return convertFromProtoFormat(p.getSubClusterId()); + } + + @Override + public void setSubClusterId(SubClusterId subClusterId) { + maybeInitBuilder(); + if (subClusterId == null) { + builder.clearSubClusterId(); + return; + } + builder.setSubClusterId(convertToProtoFormat(subClusterId)); + } + + @Override + public SubClusterState getState() { + SubClusterDeregisterRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasState()) { + return null; + } + return convertFromProtoFormat(p.getState()); + } + + @Override + public void setState(SubClusterState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearState(); + return; + } + builder.setState(convertToProtoFormat(state)); + } + + private SubClusterId convertFromProtoFormat(SubClusterIdProto sc) { + return new SubClusterIdPBImpl(sc); + } + + private SubClusterIdProto convertToProtoFormat(SubClusterId sc) { + return ((SubClusterIdPBImpl) sc).getProto(); + } + + private SubClusterState convertFromProtoFormat(SubClusterStateProto state) { + return SubClusterState.valueOf(state.name()); + } + + private SubClusterStateProto convertToProtoFormat(SubClusterState state) { + return SubClusterStateProto.valueOf(state.name()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterResponsePBImpl.java new file mode 100644 index 00000000000..9e007968e26 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterResponsePBImpl.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterDeregisterResponse}. + */ +@Private +@Unstable +public class SubClusterDeregisterResponsePBImpl + extends SubClusterDeregisterResponse { + + private SubClusterDeregisterResponseProto proto = + SubClusterDeregisterResponseProto.getDefaultInstance(); + private SubClusterDeregisterResponseProto.Builder builder = null; + private boolean viaProto = false; + + public SubClusterDeregisterResponsePBImpl() { + builder = SubClusterDeregisterResponseProto.newBuilder(); + } + + public SubClusterDeregisterResponsePBImpl( + SubClusterDeregisterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterDeregisterResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterHeartbeatRequestPBImpl.java new file mode 100644 index 00000000000..ca6b154bdc4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterHeartbeatRequestPBImpl.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterHeartbeatRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterHeartbeatRequestProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterStateProto; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterHeartbeatRequest}. + */ +@Private +@Unstable +public class SubClusterHeartbeatRequestPBImpl + extends SubClusterHeartbeatRequest { + + private SubClusterHeartbeatRequestProto proto = + SubClusterHeartbeatRequestProto.getDefaultInstance(); + private SubClusterHeartbeatRequestProto.Builder builder = null; + private boolean viaProto = false; + + private SubClusterId subClusterId = null; + + public SubClusterHeartbeatRequestPBImpl() { + builder = SubClusterHeartbeatRequestProto.newBuilder(); + } + + public SubClusterHeartbeatRequestPBImpl( + SubClusterHeartbeatRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterHeartbeatRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SubClusterHeartbeatRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterId != null) { + builder.setSubClusterId(convertToProtoFormat(this.subClusterId)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterId getSubClusterId() { + SubClusterHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder; + if (this.subClusterId != null) { + return this.subClusterId; + } + if (!p.hasSubClusterId()) { + return null; + } + this.subClusterId = convertFromProtoFormat(p.getSubClusterId()); + return this.subClusterId; + } + + @Override + public void setSubClusterId(SubClusterId subClusterId) { + maybeInitBuilder(); + if (subClusterId == null) { + builder.clearSubClusterId(); + } + this.subClusterId = subClusterId; + } + + @Override + public long getLastHeartBeat() { + SubClusterHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder; + return p.getLastHeartBeat(); + } + + @Override + public void setLastHeartBeat(long time) { + maybeInitBuilder(); + builder.setLastHeartBeat(time); + } + + @Override + public SubClusterState getState() { + SubClusterHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasState()) { + return null; + } + return convertFromProtoFormat(p.getState()); + } + + @Override + public void setState(SubClusterState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearState(); + return; + } + builder.setState(convertToProtoFormat(state)); + } + + @Override + public String getCapability() { + SubClusterHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasCapability()) ? p.getCapability() : null; + } + + @Override + public void setCapability(String capability) { + maybeInitBuilder(); + if (capability == null) { + builder.clearCapability(); + return; + } + builder.setCapability(capability); + } + + private SubClusterId convertFromProtoFormat(SubClusterIdProto clusterId) { + return new SubClusterIdPBImpl(clusterId); + } + + private SubClusterIdProto convertToProtoFormat(SubClusterId clusterId) { + return ((SubClusterIdPBImpl) clusterId).getProto(); + } + + private SubClusterState convertFromProtoFormat(SubClusterStateProto state) { + return SubClusterState.valueOf(state.name()); + } + + private SubClusterStateProto convertToProtoFormat(SubClusterState state) { + return SubClusterStateProto.valueOf(state.name()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterHeartbeatResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterHeartbeatResponsePBImpl.java new file mode 100644 index 00000000000..2020c1ad749 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterHeartbeatResponsePBImpl.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterHeartbeatResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterHeartbeatResponse}. + */ +@Private +@Unstable +public class SubClusterHeartbeatResponsePBImpl + extends SubClusterHeartbeatResponse { + + private SubClusterHeartbeatResponseProto proto = + SubClusterHeartbeatResponseProto.getDefaultInstance(); + private SubClusterHeartbeatResponseProto.Builder builder = null; + private boolean viaProto = false; + + public SubClusterHeartbeatResponsePBImpl() { + builder = SubClusterHeartbeatResponseProto.newBuilder(); + } + + public SubClusterHeartbeatResponsePBImpl( + SubClusterHeartbeatResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterHeartbeatResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterIdPBImpl.java new file mode 100644 index 00000000000..1bf96bfc503 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterIdPBImpl.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * Protocol buffer based implementation of {@link SubClusterId}. + */ +@Private +@Unstable +public class SubClusterIdPBImpl extends SubClusterId { + + private SubClusterIdProto proto = SubClusterIdProto.getDefaultInstance(); + private SubClusterIdProto.Builder builder = null; + private boolean viaProto = false; + + public SubClusterIdPBImpl() { + builder = SubClusterIdProto.newBuilder(); + } + + public SubClusterIdPBImpl(SubClusterIdProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterIdProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SubClusterIdProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public String getId() { + SubClusterIdProtoOrBuilder p = viaProto ? proto : builder; + return p.getId(); + } + + @Override + protected void setId(String subClusterId) { + maybeInitBuilder(); + if (subClusterId == null) { + builder.clearId(); + return; + } + builder.setId(subClusterId); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java new file mode 100644 index 00000000000..cfdd0385cdf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java @@ -0,0 +1,251 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProtoOrBuilder; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterStateProto; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; + +import com.google.common.base.Preconditions; +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterInfo}. + */ +@Private +@Unstable +public class SubClusterInfoPBImpl extends SubClusterInfo { + + private SubClusterInfoProto proto = SubClusterInfoProto.getDefaultInstance(); + private SubClusterInfoProto.Builder builder = null; + private boolean viaProto = false; + + private SubClusterId subClusterId = null; + + public SubClusterInfoPBImpl() { + builder = SubClusterInfoProto.newBuilder(); + } + + public SubClusterInfoPBImpl(SubClusterInfoProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterInfoProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SubClusterInfoProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterId != null) { + builder.setSubClusterId(convertToProtoFormat(this.subClusterId)); + } + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterId getSubClusterId() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + if (this.subClusterId != null) { + return this.subClusterId; + } + if (!p.hasSubClusterId()) { + return null; + } + this.subClusterId = convertFromProtoFormat(p.getSubClusterId()); + return this.subClusterId; + } + + @Override + public void setSubClusterId(SubClusterId subClusterId) { + maybeInitBuilder(); + if (subClusterId == null) { + builder.clearSubClusterId(); + } + this.subClusterId = subClusterId; + } + + @Override + public String getAMRMServiceAddress() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasAMRMServiceAddress()) ? p.getAMRMServiceAddress() : null; + } + + @Override + public void setAMRMServiceAddress(String amRMServiceAddress) { + maybeInitBuilder(); + if (amRMServiceAddress == null) { + builder.clearAMRMServiceAddress(); + return; + } + builder.setAMRMServiceAddress(amRMServiceAddress); + } + + @Override + public String getClientRMServiceAddress() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasClientRMServiceAddress()) ? p.getClientRMServiceAddress() + : null; + } + + @Override + public void setClientRMServiceAddress(String clientRMServiceAddress) { + maybeInitBuilder(); + if (clientRMServiceAddress == null) { + builder.clearClientRMServiceAddress(); + return; + } + builder.setClientRMServiceAddress(clientRMServiceAddress); + } + + @Override + public String getRMAdminServiceAddress() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasRMAdminServiceAddress()) ? p.getRMAdminServiceAddress() : null; + } + + @Override + public void setRMAdminServiceAddress(String rmAdminServiceAddress) { + maybeInitBuilder(); + if (rmAdminServiceAddress == null) { + builder.clearRMAdminServiceAddress(); + return; + } + builder.setRMAdminServiceAddress(rmAdminServiceAddress); + } + + @Override + public String getRMWebServiceAddress() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasRMWebServiceAddress()) ? p.getRMWebServiceAddress() : null; + } + + @Override + public void setRMWebServiceAddress(String rmWebServiceAddress) { + maybeInitBuilder(); + if (rmWebServiceAddress == null) { + builder.clearRMWebServiceAddress(); + return; + } + builder.setRMWebServiceAddress(rmWebServiceAddress); + } + + @Override + public long getLastHeartBeat() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return p.getLastHeartBeat(); + } + + @Override + public void setLastHeartBeat(long time) { + maybeInitBuilder(); + builder.setLastHeartBeat(time); + } + + @Override + public SubClusterState getState() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasState()) { + return null; + } + return convertFromProtoFormat(p.getState()); + } + + @Override + public void setState(SubClusterState state) { + maybeInitBuilder(); + if (state == null) { + builder.clearState(); + return; + } + builder.setState(convertToProtoFormat(state)); + } + + @Override + public long getLastStartTime() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasLastStartTime()) ? p.getLastStartTime() : 0; + } + + @Override + public void setLastStartTime(long lastStartTime) { + Preconditions.checkNotNull(builder); + builder.setLastStartTime(lastStartTime); + } + + @Override + public String getCapability() { + SubClusterInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasCapability()) ? p.getCapability() : null; + } + + @Override + public void setCapability(String capability) { + maybeInitBuilder(); + if (capability == null) { + builder.clearCapability(); + return; + } + builder.setCapability(capability); + } + + private SubClusterId convertFromProtoFormat(SubClusterIdProto clusterId) { + return new SubClusterIdPBImpl(clusterId); + } + + private SubClusterIdProto convertToProtoFormat(SubClusterId clusterId) { + return ((SubClusterIdPBImpl) clusterId).getProto(); + } + + private SubClusterState convertFromProtoFormat(SubClusterStateProto state) { + return SubClusterState.valueOf(state.name()); + } + + private SubClusterStateProto convertToProtoFormat(SubClusterState state) { + return SubClusterStateProto.valueOf(state.name()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterPolicyConfigurationPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterPolicyConfigurationPBImpl.java new file mode 100644 index 00000000000..305a8d32232 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterPolicyConfigurationPBImpl.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterPolicyConfigurationProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; + +import com.google.protobuf.TextFormat; + +/** + * Protobuf based implementation of {@link SubClusterPolicyConfiguration}. + * + */ +@Private +@Unstable +public class SubClusterPolicyConfigurationPBImpl + extends SubClusterPolicyConfiguration { + + private SubClusterPolicyConfigurationProto proto = + SubClusterPolicyConfigurationProto.getDefaultInstance(); + private SubClusterPolicyConfigurationProto.Builder builder = null; + private boolean viaProto = false; + + public SubClusterPolicyConfigurationPBImpl() { + builder = SubClusterPolicyConfigurationProto.newBuilder(); + } + + public SubClusterPolicyConfigurationPBImpl( + SubClusterPolicyConfigurationProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterPolicyConfigurationProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SubClusterPolicyConfigurationProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public String getQueue() { + SubClusterPolicyConfigurationProtoOrBuilder p = viaProto ? proto : builder; + return p.getQueue(); + } + + @Override + public void setQueue(String queueName) { + maybeInitBuilder(); + if (queueName == null) { + builder.clearType(); + return; + } + builder.setQueue(queueName); + + } + + @Override + public String getType() { + SubClusterPolicyConfigurationProtoOrBuilder p = viaProto ? proto : builder; + return p.getType(); + } + + @Override + public void setType(String policyType) { + maybeInitBuilder(); + if (policyType == null) { + builder.clearType(); + return; + } + builder.setType(policyType); + } + + @Override + public ByteBuffer getParams() { + SubClusterPolicyConfigurationProtoOrBuilder p = viaProto ? proto : builder; + return ProtoUtils.convertFromProtoFormat(p.getParams()); + } + + @Override + public void setParams(ByteBuffer policyParams) { + maybeInitBuilder(); + if (policyParams == null) { + builder.clearParams(); + return; + } + builder.setParams(ProtoUtils.convertToProtoFormat(policyParams)); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterRegisterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterRegisterRequestPBImpl.java new file mode 100644 index 00000000000..3429cc91712 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterRegisterRequestPBImpl.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterRegisterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterRegisterRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterRegisterRequest}. + */ +@Private +@Unstable +public class SubClusterRegisterRequestPBImpl extends SubClusterRegisterRequest { + + private SubClusterRegisterRequestProto proto = + SubClusterRegisterRequestProto.getDefaultInstance(); + private SubClusterRegisterRequestProto.Builder builder = null; + private boolean viaProto = false; + + private SubClusterInfo subClusterInfo = null; + + public SubClusterRegisterRequestPBImpl() { + builder = SubClusterRegisterRequestProto.newBuilder(); + } + + public SubClusterRegisterRequestPBImpl(SubClusterRegisterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterRegisterRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SubClusterRegisterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.subClusterInfo != null) { + builder.setSubClusterInfo(convertToProtoFormat(this.subClusterInfo)); + } + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public SubClusterInfo getSubClusterInfo() { + SubClusterRegisterRequestProtoOrBuilder p = viaProto ? proto : builder; + if (this.subClusterInfo != null) { + return this.subClusterInfo; + } + if (!p.hasSubClusterInfo()) { + return null; + } + this.subClusterInfo = convertFromProtoFormat(p.getSubClusterInfo()); + return this.subClusterInfo; + } + + @Override + public void setSubClusterInfo(SubClusterInfo subClusterInfo) { + maybeInitBuilder(); + if (subClusterInfo == null) { + builder.clearSubClusterInfo(); + } + this.subClusterInfo = subClusterInfo; + } + + private SubClusterInfo convertFromProtoFormat( + SubClusterInfoProto clusterInfo) { + return new SubClusterInfoPBImpl(clusterInfo); + } + + private SubClusterInfoProto convertToProtoFormat(SubClusterInfo clusterInfo) { + return ((SubClusterInfoPBImpl) clusterInfo).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterRegisterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterRegisterResponsePBImpl.java new file mode 100644 index 00000000000..68930e33b10 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterRegisterResponsePBImpl.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterRegisterResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of {@link SubClusterRegisterResponse}. + */ +@Private +@Unstable +public class SubClusterRegisterResponsePBImpl + extends SubClusterRegisterResponse { + + private SubClusterRegisterResponseProto proto = + SubClusterRegisterResponseProto.getDefaultInstance(); + private SubClusterRegisterResponseProto.Builder builder = null; + private boolean viaProto = false; + + public SubClusterRegisterResponsePBImpl() { + builder = SubClusterRegisterResponseProto.newBuilder(); + } + + public SubClusterRegisterResponsePBImpl( + SubClusterRegisterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public SubClusterRegisterResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/UpdateApplicationHomeSubClusterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/UpdateApplicationHomeSubClusterRequestPBImpl.java new file mode 100644 index 00000000000..e42eb00aa3a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/UpdateApplicationHomeSubClusterRequestPBImpl.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.UpdateApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.UpdateApplicationHomeSubClusterRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link UpdateApplicationHomeSubClusterRequest} . + */ +@Private +@Unstable +public class UpdateApplicationHomeSubClusterRequestPBImpl + extends UpdateApplicationHomeSubClusterRequest { + + private UpdateApplicationHomeSubClusterRequestProto proto = + UpdateApplicationHomeSubClusterRequestProto.getDefaultInstance(); + private UpdateApplicationHomeSubClusterRequestProto.Builder builder = null; + private boolean viaProto = false; + + public UpdateApplicationHomeSubClusterRequestPBImpl() { + builder = UpdateApplicationHomeSubClusterRequestProto.newBuilder(); + } + + public UpdateApplicationHomeSubClusterRequestPBImpl( + UpdateApplicationHomeSubClusterRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public UpdateApplicationHomeSubClusterRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = UpdateApplicationHomeSubClusterRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + @Override + public ApplicationHomeSubCluster getApplicationHomeSubCluster() { + UpdateApplicationHomeSubClusterRequestProtoOrBuilder p = + viaProto ? proto : builder; + if (!p.hasAppSubclusterMap()) { + return null; + } + return convertFromProtoFormat(p.getAppSubclusterMap()); + } + + @Override + public void setApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationInfo) { + maybeInitBuilder(); + if (applicationInfo == null) { + builder.clearAppSubclusterMap(); + return; + } + builder.setAppSubclusterMap(convertToProtoFormat(applicationInfo)); + } + + private ApplicationHomeSubCluster convertFromProtoFormat( + ApplicationHomeSubClusterProto sc) { + return new ApplicationHomeSubClusterPBImpl(sc); + } + + private ApplicationHomeSubClusterProto convertToProtoFormat( + ApplicationHomeSubCluster sc) { + return ((ApplicationHomeSubClusterPBImpl) sc).getProto(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/UpdateApplicationHomeSubClusterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/UpdateApplicationHomeSubClusterResponsePBImpl.java new file mode 100644 index 00000000000..ec31f0b496b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/UpdateApplicationHomeSubClusterResponsePBImpl.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.UpdateApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; + +import com.google.protobuf.TextFormat; + +/** + * Protocol buffer based implementation of + * {@link UpdateApplicationHomeSubClusterResponse}. + */ +@Private +@Unstable +public class UpdateApplicationHomeSubClusterResponsePBImpl + extends UpdateApplicationHomeSubClusterResponse { + + private UpdateApplicationHomeSubClusterResponseProto proto = + UpdateApplicationHomeSubClusterResponseProto.getDefaultInstance(); + private UpdateApplicationHomeSubClusterResponseProto.Builder builder = null; + private boolean viaProto = false; + + public UpdateApplicationHomeSubClusterResponsePBImpl() { + builder = UpdateApplicationHomeSubClusterResponseProto.newBuilder(); + } + + public UpdateApplicationHomeSubClusterResponsePBImpl( + UpdateApplicationHomeSubClusterResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public UpdateApplicationHomeSubClusterResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/package-info.java new file mode 100644 index 00000000000..2f85c487366 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store.records.impl.pb; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/package-info.java new file mode 100644 index 00000000000..9a9b28255ba --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store.records; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationApplicationHomeSubClusterStoreInputValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationApplicationHomeSubClusterStoreInputValidator.java new file mode 100644 index 00000000000..0184c9fad66 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationApplicationHomeSubClusterStoreInputValidator.java @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.utils; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class to validate the inputs to + * {@code FederationApplicationHomeSubClusterStore}, allows a fail fast + * mechanism for invalid user inputs. + * + */ +public final class FederationApplicationHomeSubClusterStoreInputValidator { + + private static final Logger LOG = LoggerFactory + .getLogger(FederationApplicationHomeSubClusterStoreInputValidator.class); + + private FederationApplicationHomeSubClusterStoreInputValidator() { + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link AddApplicationHomeSubClusterRequest} + * for adding a new application is valid or not. + * + * @param request the {@link AddApplicationHomeSubClusterRequest} to validate + * against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(AddApplicationHomeSubClusterRequest request) + throws FederationStateStoreInvalidInputException { + if (request == null) { + String message = "Missing AddApplicationHomeSubCluster Request." + + " Please try again by specifying" + + " an AddApplicationHomeSubCluster information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate ApplicationHomeSubCluster info + checkApplicationHomeSubCluster(request.getApplicationHomeSubCluster()); + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link UpdateApplicationHomeSubClusterRequest} + * for updating an application is valid or not. + * + * @param request the {@link UpdateApplicationHomeSubClusterRequest} to + * validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(UpdateApplicationHomeSubClusterRequest request) + throws FederationStateStoreInvalidInputException { + if (request == null) { + String message = "Missing UpdateApplicationHomeSubCluster Request." + + " Please try again by specifying" + + " an ApplicationHomeSubCluster information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate ApplicationHomeSubCluster info + checkApplicationHomeSubCluster(request.getApplicationHomeSubCluster()); + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link GetApplicationHomeSubClusterRequest} + * for querying application's information is valid or not. + * + * @param request the {@link GetApplicationHomeSubClusterRequest} to validate + * against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(GetApplicationHomeSubClusterRequest request) + throws FederationStateStoreInvalidInputException { + if (request == null) { + String message = "Missing GetApplicationHomeSubCluster Request." + + " Please try again by specifying an Application Id information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate application Id + checkApplicationId(request.getApplicationId()); + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link DeleteApplicationHomeSubClusterRequest} + * for deleting an application is valid or not. + * + * @param request the {@link DeleteApplicationHomeSubClusterRequest} to + * validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(DeleteApplicationHomeSubClusterRequest request) + throws FederationStateStoreInvalidInputException { + if (request == null) { + String message = "Missing DeleteApplicationHomeSubCluster Request." + + " Please try again by specifying" + + " an ApplicationHomeSubCluster information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate application Id + checkApplicationId(request.getApplicationId()); + } + + /** + * Validate if the ApplicationHomeSubCluster info are present or not. + * + * @param applicationHomeSubCluster the information of the application to be + * verified + * @throws FederationStateStoreInvalidInputException if the SubCluster Info + * are invalid + */ + private static void checkApplicationHomeSubCluster( + ApplicationHomeSubCluster applicationHomeSubCluster) + + throws FederationStateStoreInvalidInputException { + if (applicationHomeSubCluster == null) { + String message = "Missing ApplicationHomeSubCluster Info." + + " Please try again by specifying" + + " an ApplicationHomeSubCluster information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + // validate application Id + checkApplicationId(applicationHomeSubCluster.getApplicationId()); + + // validate subcluster Id + FederationMembershipStateStoreInputValidator + .checkSubClusterId(applicationHomeSubCluster.getHomeSubCluster()); + + } + + /** + * Validate if the application id is present or not. + * + * @param appId the id of the application to be verified + * @throws FederationStateStoreInvalidInputException if the application Id is + * invalid + */ + private static void checkApplicationId(ApplicationId appId) + throws FederationStateStoreInvalidInputException { + if (appId == null) { + String message = "Missing Application Id." + + " Please try again by specifying an Application Id."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationMembershipStateStoreInputValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationMembershipStateStoreInputValidator.java new file mode 100644 index 00000000000..0ec8e5de6f3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationMembershipStateStoreInputValidator.java @@ -0,0 +1,315 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.utils; + +import java.net.URI; + +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class to validate the inputs to + * {@code FederationMembershipStateStore}, allows a fail fast mechanism for + * invalid user inputs. + * + */ +public final class FederationMembershipStateStoreInputValidator { + + private static final Logger LOG = LoggerFactory + .getLogger(FederationMembershipStateStoreInputValidator.class); + + private FederationMembershipStateStoreInputValidator() { + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link SubClusterRegisterRequest} for + * registration a new subcluster is valid or not. + * + * @param request the {@link SubClusterRegisterRequest} to validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(SubClusterRegisterRequest request) + throws FederationStateStoreInvalidInputException { + + // check if the request is present + if (request == null) { + String message = "Missing SubClusterRegister Request." + + " Please try again by specifying a" + + " SubCluster Register Information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + + } + + // validate subcluster info + checkSubClusterInfo(request.getSubClusterInfo()); + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link SubClusterDeregisterRequest} for + * deregistration a subcluster is valid or not. + * + * @param request the {@link SubClusterDeregisterRequest} to validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(SubClusterDeregisterRequest request) + throws FederationStateStoreInvalidInputException { + + // check if the request is present + if (request == null) { + String message = "Missing SubClusterDeregister Request." + + " Please try again by specifying a" + + " SubCluster Deregister Information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate subcluster id + checkSubClusterId(request.getSubClusterId()); + // validate subcluster state + checkSubClusterState(request.getState()); + if (!request.getState().isFinal()) { + String message = "Invalid non-final state: " + request.getState(); + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link SubClusterHeartbeatRequest} for + * heartbeating a subcluster is valid or not. + * + * @param request the {@link SubClusterHeartbeatRequest} to validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(SubClusterHeartbeatRequest request) + throws FederationStateStoreInvalidInputException { + + // check if the request is present + if (request == null) { + String message = "Missing SubClusterHeartbeat Request." + + " Please try again by specifying a" + + " SubCluster Heartbeat Information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate subcluster id + checkSubClusterId(request.getSubClusterId()); + // validate last heartbeat timestamp + checkTimestamp(request.getLastHeartBeat()); + // validate subcluster capability + checkCapability(request.getCapability()); + // validate subcluster state + checkSubClusterState(request.getState()); + + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided {@link GetSubClusterInfoRequest} for querying + * subcluster's information is valid or not. + * + * @param request the {@link GetSubClusterInfoRequest} to validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(GetSubClusterInfoRequest request) + throws FederationStateStoreInvalidInputException { + + // check if the request is present + if (request == null) { + String message = "Missing GetSubClusterInfo Request." + + " Please try again by specifying a Get SubCluster information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate subcluster id + checkSubClusterId(request.getSubClusterId()); + } + + /** + * Validate if all the required fields on {@link SubClusterInfo} are present + * or not. {@code Capability} will be empty as the corresponding + * {@code ResourceManager} is in the process of initialization during + * registration. + * + * @param subClusterInfo the information of the subcluster to be verified + * @throws FederationStateStoreInvalidInputException if the SubCluster Info + * are invalid + */ + public static void checkSubClusterInfo(SubClusterInfo subClusterInfo) + throws FederationStateStoreInvalidInputException { + if (subClusterInfo == null) { + String message = "Missing SubCluster Information." + + " Please try again by specifying SubCluster Information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate subcluster id + checkSubClusterId(subClusterInfo.getSubClusterId()); + + // validate AMRM Service address + checkAddress(subClusterInfo.getAMRMServiceAddress()); + // validate ClientRM Service address + checkAddress(subClusterInfo.getClientRMServiceAddress()); + // validate RMClient Service address + checkAddress(subClusterInfo.getRMAdminServiceAddress()); + // validate RMWeb Service address + checkAddress(subClusterInfo.getRMWebServiceAddress()); + + // validate last heartbeat timestamp + checkTimestamp(subClusterInfo.getLastHeartBeat()); + // validate last start timestamp + checkTimestamp(subClusterInfo.getLastStartTime()); + + // validate subcluster state + checkSubClusterState(subClusterInfo.getState()); + + } + + /** + * Validate if the timestamp is positive or not. + * + * @param timestamp the timestamp to be verified + * @throws FederationStateStoreInvalidInputException if the timestamp is + * invalid + */ + private static void checkTimestamp(long timestamp) + throws FederationStateStoreInvalidInputException { + if (timestamp < 0) { + String message = "Invalid timestamp information." + + " Please try again by specifying valid Timestamp Information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + + /** + * Validate if the Capability is present or not. + * + * @param capability the capability of the subcluster to be verified + * @throws FederationStateStoreInvalidInputException if the capability is + * invalid + */ + private static void checkCapability(String capability) + throws FederationStateStoreInvalidInputException { + if (capability == null || capability.isEmpty()) { + String message = "Invalid capability information." + + " Please try again by specifying valid Capability Information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + + /** + * Validate if the SubCluster Id is present or not. + * + * @param subClusterId the identifier of the subcluster to be verified + * @throws FederationStateStoreInvalidInputException if the SubCluster Id is + * invalid + */ + protected static void checkSubClusterId(SubClusterId subClusterId) + throws FederationStateStoreInvalidInputException { + // check if cluster id is present + if (subClusterId == null) { + String message = "Missing SubCluster Id information." + + " Please try again by specifying Subcluster Id information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + // check if cluster id is valid + if (subClusterId.getId().isEmpty()) { + String message = "Invalid SubCluster Id information." + + " Please try again by specifying valid Subcluster Id."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + + /** + * Validate if the SubCluster Address is a valid URL or not. + * + * @param address the endpoint of the subcluster to be verified + * @throws FederationStateStoreInvalidInputException if the address is invalid + */ + private static void checkAddress(String address) + throws FederationStateStoreInvalidInputException { + // Ensure url is not null + if (address == null || address.isEmpty()) { + String message = "Missing SubCluster Endpoint information." + + " Please try again by specifying SubCluster Endpoint information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + // Validate url is well formed + boolean hasScheme = address.contains("://"); + URI uri = null; + try { + uri = hasScheme ? URI.create(address) + : URI.create("dummyscheme://" + address); + } catch (IllegalArgumentException e) { + String message = "The provided SubCluster Endpoint does not contain a" + + " valid host:port authority: " + address; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + String host = uri.getHost(); + int port = uri.getPort(); + String path = uri.getPath(); + if ((host == null) || (port < 0) + || (!hasScheme && path != null && !path.isEmpty())) { + String message = "The provided SubCluster Endpoint does not contain a" + + " valid host:port authority: " + address; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + + /** + * Validate if the SubCluster State is present or not. + * + * @param state the state of the subcluster to be verified + * @throws FederationStateStoreInvalidInputException if the SubCluster State + * is invalid + */ + private static void checkSubClusterState(SubClusterState state) + throws FederationStateStoreInvalidInputException { + // check sub-cluster state is not empty + if (state == null) { + String message = "Missing SubCluster State information." + + " Please try again by specifying SubCluster State information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationPolicyStoreInputValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationPolicyStoreInputValidator.java new file mode 100644 index 00000000000..3c68bfdace7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationPolicyStoreInputValidator.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.utils; + +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class to validate the inputs to {@code FederationPolicyStore}, allows + * a fail fast mechanism for invalid user inputs. + * + */ +public final class FederationPolicyStoreInputValidator { + + private static final Logger LOG = + LoggerFactory.getLogger(FederationPolicyStoreInputValidator.class); + + private FederationPolicyStoreInputValidator() { + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided + * {@link GetSubClusterPolicyConfigurationRequest} for querying policy's + * information is valid or not. + * + * @param request the {@link GetSubClusterPolicyConfigurationRequest} to + * validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(GetSubClusterPolicyConfigurationRequest request) + throws FederationStateStoreInvalidInputException { + if (request == null) { + String message = "Missing GetSubClusterPolicyConfiguration Request." + + " Please try again by specifying a policy selection information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate queue id + checkQueue(request.getQueue()); + } + + /** + * Quick validation on the input to check some obvious fail conditions (fail + * fast). Check if the provided + * {@link SetSubClusterPolicyConfigurationRequest} for adding a new policy is + * valid or not. + * + * @param request the {@link SetSubClusterPolicyConfigurationRequest} to + * validate against + * @throws FederationStateStoreInvalidInputException if the request is invalid + */ + public static void validate(SetSubClusterPolicyConfigurationRequest request) + throws FederationStateStoreInvalidInputException { + if (request == null) { + String message = "Missing SetSubClusterPolicyConfiguration Request." + + " Please try again by specifying an policy insertion information."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate subcluster policy configuration + checkSubClusterPolicyConfiguration(request.getPolicyConfiguration()); + } + + /** + * Validate if the SubClusterPolicyConfiguration is valid or not. + * + * @param policyConfiguration the policy information to be verified + * @throws FederationStateStoreInvalidInputException if the policy information + * are invalid + */ + private static void checkSubClusterPolicyConfiguration( + SubClusterPolicyConfiguration policyConfiguration) + throws FederationStateStoreInvalidInputException { + if (policyConfiguration == null) { + String message = "Missing SubClusterPolicyConfiguration." + + " Please try again by specifying a SubClusterPolicyConfiguration."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + + // validate queue id + checkQueue(policyConfiguration.getQueue()); + // validate policy type + checkType(policyConfiguration.getType()); + + } + + /** + * Validate if the queue id is a valid or not. + * + * @param queue the queue id of the policy to be verified + * @throws FederationStateStoreInvalidInputException if the queue id is + * invalid + */ + private static void checkQueue(String queue) + throws FederationStateStoreInvalidInputException { + if (queue == null || queue.isEmpty()) { + String message = "Missing Queue. Please try again by specifying a Queue."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + + /** + * Validate if the policy type is a valid or not. + * + * @param type the type of the policy to be verified + * @throws FederationStateStoreInvalidInputException if the policy is invalid + */ + private static void checkType(String type) + throws FederationStateStoreInvalidInputException { + if (type == null || type.isEmpty()) { + String message = "Missing Policy Type." + + " Please try again by specifying a Policy Type."; + LOG.warn(message); + throw new FederationStateStoreInvalidInputException(message); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java new file mode 100644 index 00000000000..3b870debefc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/FederationStateStoreUtils.java @@ -0,0 +1,214 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.utils; + +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreException; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreRetriableException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.zaxxer.hikari.HikariDataSource; + +/** + * Common utility methods used by the store implementations. + * + */ +public final class FederationStateStoreUtils { + + public static final Logger LOG = + LoggerFactory.getLogger(FederationStateStoreUtils.class); + + public final static String FEDERATION_STORE_URL = "url"; + + private FederationStateStoreUtils() { + } + + /** + * Returns the SQL FederationStateStore connections to the pool. + * + * @param log the logger interface + * @param cstmt the interface used to execute SQL stored procedures + * @param conn the SQL connection + * @param rs the ResultSet interface used to execute SQL stored procedures + * @throws YarnException on failure + */ + public static void returnToPool(Logger log, CallableStatement cstmt, + Connection conn, ResultSet rs) throws YarnException { + if (cstmt != null) { + try { + cstmt.close(); + } catch (SQLException e) { + logAndThrowException(log, "Exception while trying to close Statement", + e); + } + } + + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + logAndThrowException(log, "Exception while trying to close Connection", + e); + } + } + + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + logAndThrowException(log, "Exception while trying to close ResultSet", + e); + } + } + } + + /** + * Returns the SQL FederationStateStore connections to the pool. + * + * @param log the logger interface + * @param cstmt the interface used to execute SQL stored procedures + * @param conn the SQL connection + * @throws YarnException on failure + */ + public static void returnToPool(Logger log, CallableStatement cstmt, + Connection conn) throws YarnException { + returnToPool(log, cstmt, conn, null); + } + + /** + * Throws an exception due to an error in FederationStateStore. + * + * @param log the logger interface + * @param errMsg the error message + * @param t the throwable raised in the called class. + * @throws YarnException on failure + */ + public static void logAndThrowException(Logger log, String errMsg, + Throwable t) throws YarnException { + if (t != null) { + log.error(errMsg, t); + throw new YarnException(errMsg, t); + } else { + log.error(errMsg); + throw new YarnException(errMsg); + } + } + + /** + * Throws an FederationStateStoreException due to an error in + * FederationStateStore. + * + * @param log the logger interface + * @param errMsg the error message + * @throws YarnException on failure + */ + public static void logAndThrowStoreException(Logger log, String errMsg) + throws YarnException { + log.error(errMsg); + throw new FederationStateStoreException(errMsg); + } + + /** + * Throws an FederationStateStoreInvalidInputException due to an + * error in FederationStateStore. + * + * @param log the logger interface + * @param errMsg the error message + * @throws YarnException on failure + */ + public static void logAndThrowInvalidInputException(Logger log, String errMsg) + throws YarnException { + log.error(errMsg); + throw new FederationStateStoreInvalidInputException(errMsg); + } + + /** + * Throws an FederationStateStoreRetriableException due to an + * error in FederationStateStore. + * + * @param log the logger interface + * @param errMsg the error message + * @param t the throwable raised in the called class. + * @throws YarnException on failure + */ + public static void logAndThrowRetriableException(Logger log, String errMsg, + Throwable t) throws YarnException { + if (t != null) { + log.error(errMsg, t); + throw new FederationStateStoreRetriableException(errMsg, t); + } else { + log.error(errMsg); + throw new FederationStateStoreRetriableException(errMsg); + } + } + + /** + * Sets a specific value for a specific property of + * HikariDataSource SQL connections. + * + * @param dataSource the HikariDataSource connections + * @param property the property to set + * @param value the value to set + */ + public static void setProperty(HikariDataSource dataSource, String property, + String value) { + LOG.debug("Setting property {} with value {}", property, value); + if (property != null && !property.isEmpty() && value != null) { + dataSource.addDataSourceProperty(property, value); + } + } + + /** + * Sets a specific username for HikariDataSource SQL connections. + * + * @param dataSource the HikariDataSource connections + * @param userNameDB the value to set + */ + public static void setUsername(HikariDataSource dataSource, + String userNameDB) { + if (userNameDB != null) { + dataSource.setUsername(userNameDB); + LOG.debug("Setting non NULL Username for Store connection"); + } else { + LOG.debug("NULL Username specified for Store connection, so ignoring"); + } + } + + /** + * Sets a specific password for HikariDataSource SQL connections. + * + * @param dataSource the HikariDataSource connections + * @param password the value to set + */ + public static void setPassword(HikariDataSource dataSource, String password) { + if (password != null) { + dataSource.setPassword(password); + LOG.debug("Setting non NULL Credentials for Store connection"); + } else { + LOG.debug("NULL Credentials specified for Store connection, so ignoring"); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/package-info.java new file mode 100644 index 00000000000..f4a9c7eff79 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/utils/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.store.utils; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java new file mode 100644 index 00000000000..682eb1457d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java @@ -0,0 +1,602 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.utils; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import javax.cache.Cache; +import javax.cache.CacheManager; +import javax.cache.Caching; +import javax.cache.configuration.CompleteConfiguration; +import javax.cache.configuration.FactoryBuilder; +import javax.cache.configuration.MutableConfiguration; +import javax.cache.expiry.CreatedExpiryPolicy; +import javax.cache.expiry.Duration; +import javax.cache.expiry.ExpiryPolicy; +import javax.cache.integration.CacheLoader; +import javax.cache.integration.CacheLoaderException; +import javax.cache.spi.CachingProvider; + +import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreRetriableException; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException; + +/** + * + * The FederationStateStoreFacade is an utility wrapper that provides singleton + * access to the Federation state store. It abstracts out retries and in + * addition, it also implements the caching for various objects. + * + */ +public final class FederationStateStoreFacade { + private static final Logger LOG = + LoggerFactory.getLogger(FederationStateStoreFacade.class); + + private static final String GET_SUBCLUSTERS_CACHEID = "getSubClusters"; + private static final String GET_POLICIES_CONFIGURATIONS_CACHEID = + "getPoliciesConfigurations"; + + private static final FederationStateStoreFacade FACADE = + new FederationStateStoreFacade(); + + private FederationStateStore stateStore; + private int cacheTimeToLive; + private Configuration conf; + private Cache cache; + private SubClusterResolver subclusterResolver; + + private FederationStateStoreFacade() { + initializeFacadeInternal(new Configuration()); + } + + private void initializeFacadeInternal(Configuration config) { + this.conf = config; + try { + this.stateStore = (FederationStateStore) createRetryInstance(this.conf, + YarnConfiguration.FEDERATION_STATESTORE_CLIENT_CLASS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS, + FederationStateStore.class, createRetryPolicy(conf)); + this.stateStore.init(conf); + + this.subclusterResolver = createInstance(conf, + YarnConfiguration.FEDERATION_CLUSTER_RESOLVER_CLASS, + YarnConfiguration.DEFAULT_FEDERATION_CLUSTER_RESOLVER_CLASS, + SubClusterResolver.class); + this.subclusterResolver.load(); + + initCache(); + + } catch (YarnException ex) { + LOG.error("Failed to initialize the FederationStateStoreFacade object", + ex); + throw new RuntimeException(ex); + } + } + + /** + * Delete and re-initialize the cache, to force it to use the given + * configuration. + * + * @param store the {@link FederationStateStore} instance to reinitialize with + * @param config the updated configuration to reinitialize with + */ + @VisibleForTesting + public synchronized void reinitialize(FederationStateStore store, + Configuration config) { + this.conf = config; + this.stateStore = store; + clearCache(); + initCache(); + } + + /** + * Create a RetryPolicy for {@code FederationStateStoreFacade}. In case of + * failure, it retries for: + *

    + *
  • {@code FederationStateStoreRetriableException}
  • + *
  • {@code CacheLoaderException}
  • + *
+ * + * @param conf the updated configuration + * @return the RetryPolicy for FederationStateStoreFacade + */ + public static RetryPolicy createRetryPolicy(Configuration conf) { + // Retry settings for StateStore + RetryPolicy basePolicy = RetryPolicies.exponentialBackoffRetry( + conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, Integer.SIZE), + conf.getLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, + YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS), + TimeUnit.MILLISECONDS); + Map, RetryPolicy> exceptionToPolicyMap = + new HashMap, RetryPolicy>(); + exceptionToPolicyMap.put(FederationStateStoreRetriableException.class, + basePolicy); + exceptionToPolicyMap.put(CacheLoaderException.class, basePolicy); + exceptionToPolicyMap.put(PoolInitializationException.class, basePolicy); + + RetryPolicy retryPolicy = RetryPolicies.retryByException( + RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap); + return retryPolicy; + } + + private boolean isCachingEnabled() { + return (cacheTimeToLive > 0); + } + + private void initCache() { + // Picking the JCache provider from classpath, need to make sure there's + // no conflict or pick up a specific one in the future + cacheTimeToLive = + conf.getInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, + YarnConfiguration.DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS); + if (isCachingEnabled()) { + CachingProvider jcacheProvider = Caching.getCachingProvider(); + CacheManager jcacheManager = jcacheProvider.getCacheManager(); + this.cache = jcacheManager.getCache(this.getClass().getSimpleName()); + if (this.cache == null) { + LOG.info("Creating a JCache Manager with name " + + this.getClass().getSimpleName()); + Duration cacheExpiry = new Duration(TimeUnit.SECONDS, cacheTimeToLive); + CompleteConfiguration configuration = + new MutableConfiguration().setStoreByValue(false) + .setReadThrough(true) + .setExpiryPolicyFactory( + new FactoryBuilder.SingletonFactory( + new CreatedExpiryPolicy(cacheExpiry))) + .setCacheLoaderFactory( + new FactoryBuilder.SingletonFactory>( + new CacheLoaderImpl())); + this.cache = jcacheManager.createCache(this.getClass().getSimpleName(), + configuration); + } + } + } + + private void clearCache() { + CachingProvider jcacheProvider = Caching.getCachingProvider(); + CacheManager jcacheManager = jcacheProvider.getCacheManager(); + + jcacheManager.destroyCache(this.getClass().getSimpleName()); + this.cache = null; + } + + /** + * Returns the singleton instance of the FederationStateStoreFacade object. + * + * @return the singleton {@link FederationStateStoreFacade} instance + */ + public static FederationStateStoreFacade getInstance() { + return FACADE; + } + + /** + * Returns the {@link SubClusterInfo} for the specified {@link SubClusterId}. + * + * @param subClusterId the identifier of the sub-cluster + * @return the sub cluster information, or + * {@code null} if there is no mapping for the subClusterId + * @throws YarnException if the call to the state store is unsuccessful + */ + public SubClusterInfo getSubCluster(final SubClusterId subClusterId) + throws YarnException { + if (isCachingEnabled()) { + return getSubClusters(false).get(subClusterId); + } else { + GetSubClusterInfoResponse response = stateStore + .getSubCluster(GetSubClusterInfoRequest.newInstance(subClusterId)); + if (response == null) { + return null; + } else { + return response.getSubClusterInfo(); + } + } + } + + /** + * Updates the cache with the central {@link FederationStateStore} and returns + * the {@link SubClusterInfo} for the specified {@link SubClusterId}. + * + * @param subClusterId the identifier of the sub-cluster + * @param flushCache flag to indicate if the cache should be flushed or not + * @return the sub cluster information + * @throws YarnException if the call to the state store is unsuccessful + */ + public SubClusterInfo getSubCluster(final SubClusterId subClusterId, + final boolean flushCache) throws YarnException { + if (flushCache && isCachingEnabled()) { + LOG.info("Flushing subClusters from cache and rehydrating from store," + + " most likely on account of RM failover."); + cache.remove(buildGetSubClustersCacheRequest(false)); + } + return getSubCluster(subClusterId); + } + + /** + * Returns the {@link SubClusterInfo} of all active sub cluster(s). + * + * @param filterInactiveSubClusters whether to filter out inactive + * sub-clusters + * @return the information of all active sub cluster(s) + * @throws YarnException if the call to the state store is unsuccessful + */ + @SuppressWarnings("unchecked") + public Map getSubClusters( + final boolean filterInactiveSubClusters) throws YarnException { + try { + if (isCachingEnabled()) { + return (Map) cache + .get(buildGetSubClustersCacheRequest(filterInactiveSubClusters)); + } else { + return buildSubClusterInfoMap(stateStore.getSubClusters( + GetSubClustersInfoRequest.newInstance(filterInactiveSubClusters))); + } + } catch (Throwable ex) { + throw new YarnException(ex); + } + } + + /** + * Returns the {@link SubClusterPolicyConfiguration} for the specified queue. + * + * @param queue the queue whose policy is required + * @return the corresponding configured policy, or {@code null} if there is no + * mapping for the queue + * @throws YarnException if the call to the state store is unsuccessful + */ + public SubClusterPolicyConfiguration getPolicyConfiguration( + final String queue) throws YarnException { + if (isCachingEnabled()) { + return getPoliciesConfigurations().get(queue); + } else { + + GetSubClusterPolicyConfigurationResponse response = + stateStore.getPolicyConfiguration( + GetSubClusterPolicyConfigurationRequest.newInstance(queue)); + if (response == null) { + return null; + } else { + return response.getPolicyConfiguration(); + } + } + } + + /** + * Get the policies that is represented as + * {@link SubClusterPolicyConfiguration} for all currently active queues in + * the system. + * + * @return the policies for all currently active queues in the system + * @throws YarnException if the call to the state store is unsuccessful + */ + @SuppressWarnings("unchecked") + public Map getPoliciesConfigurations() + throws YarnException { + try { + if (isCachingEnabled()) { + return (Map) cache + .get(buildGetPoliciesConfigurationsCacheRequest()); + } else { + return buildPolicyConfigMap(stateStore.getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest.newInstance())); + } + } catch (Throwable ex) { + throw new YarnException(ex); + } + } + + /** + * Adds the home {@link SubClusterId} for the specified {@link ApplicationId}. + * + * @param appHomeSubCluster the mapping of the application to it's home + * sub-cluster + * @return the stored Subcluster from StateStore + * @throws YarnException if the call to the state store is unsuccessful + */ + public SubClusterId addApplicationHomeSubCluster( + ApplicationHomeSubCluster appHomeSubCluster) throws YarnException { + AddApplicationHomeSubClusterResponse response = + stateStore.addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster)); + return response.getHomeSubCluster(); + } + + /** + * Updates the home {@link SubClusterId} for the specified + * {@link ApplicationId}. + * + * @param appHomeSubCluster the mapping of the application to it's home + * sub-cluster + * @throws YarnException if the call to the state store is unsuccessful + */ + public void updateApplicationHomeSubCluster( + ApplicationHomeSubCluster appHomeSubCluster) throws YarnException { + stateStore.updateApplicationHomeSubCluster( + UpdateApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster)); + return; + } + + /** + * Returns the home {@link SubClusterId} for the specified + * {@link ApplicationId}. + * + * @param appId the identifier of the application + * @return the home sub cluster identifier + * @throws YarnException if the call to the state store is unsuccessful + */ + public SubClusterId getApplicationHomeSubCluster(ApplicationId appId) + throws YarnException { + GetApplicationHomeSubClusterResponse response = + stateStore.getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest.newInstance(appId)); + return response.getApplicationHomeSubCluster().getHomeSubCluster(); + } + + /** + * Get the singleton instance of SubClusterResolver. + * + * @return SubClusterResolver instance + */ + public SubClusterResolver getSubClusterResolver() { + return this.subclusterResolver; + } + + /** + * Helper method to create instances of Object using the class name defined in + * the configuration object. The instances creates {@link RetryProxy} using + * the specific {@link RetryPolicy}. + * + * @param conf the yarn configuration + * @param configuredClassName the configuration provider key + * @param defaultValue the default implementation for fallback + * @param type the class for which a retry proxy is required + * @param retryPolicy the policy for retrying method call failures + * @return a retry proxy for the specified interface + */ + public static Object createRetryInstance(Configuration conf, + String configuredClassName, String defaultValue, Class type, + RetryPolicy retryPolicy) { + + return RetryProxy.create(type, + createInstance(conf, configuredClassName, defaultValue, type), + retryPolicy); + } + + /** + * Helper method to create instances of Object using the class name specified + * in the configuration object. + * + * @param conf the yarn configuration + * @param configuredClassName the configuration provider key + * @param defaultValue the default implementation class + * @param type the required interface/base class + * @param The type of the instance to create + * @return the instances created + */ + @SuppressWarnings("unchecked") + public static T createInstance(Configuration conf, + String configuredClassName, String defaultValue, Class type) { + + String className = conf.get(configuredClassName, defaultValue); + try { + Class clusterResolverClass = conf.getClassByName(className); + if (type.isAssignableFrom(clusterResolverClass)) { + return (T) ReflectionUtils.newInstance(clusterResolverClass, conf); + } else { + throw new YarnRuntimeException("Class: " + className + + " not instance of " + type.getCanonicalName()); + } + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException("Could not instantiate : " + className, e); + } + } + + private Map buildSubClusterInfoMap( + final GetSubClustersInfoResponse response) { + List subClusters = response.getSubClusters(); + Map subClustersMap = + new HashMap<>(subClusters.size()); + for (SubClusterInfo subCluster : subClusters) { + subClustersMap.put(subCluster.getSubClusterId(), subCluster); + } + return subClustersMap; + } + + private Object buildGetSubClustersCacheRequest( + final boolean filterInactiveSubClusters) { + final String cacheKey = buildCacheKey(getClass().getSimpleName(), + GET_SUBCLUSTERS_CACHEID, null); + CacheRequest> cacheRequest = + new CacheRequest>(cacheKey, + new Func>() { + @Override + public Map invoke(String key) + throws Exception { + GetSubClustersInfoResponse subClusters = + stateStore.getSubClusters(GetSubClustersInfoRequest + .newInstance(filterInactiveSubClusters)); + return buildSubClusterInfoMap(subClusters); + } + }); + return cacheRequest; + } + + private Map buildPolicyConfigMap( + GetSubClusterPoliciesConfigurationsResponse response) { + List policyConfigs = + response.getPoliciesConfigs(); + Map queuePolicyConfigs = + new HashMap<>(); + for (SubClusterPolicyConfiguration policyConfig : policyConfigs) { + queuePolicyConfigs.put(policyConfig.getQueue(), policyConfig); + } + return queuePolicyConfigs; + } + + private Object buildGetPoliciesConfigurationsCacheRequest() { + final String cacheKey = buildCacheKey(getClass().getSimpleName(), + GET_POLICIES_CONFIGURATIONS_CACHEID, null); + CacheRequest> cacheRequest = + new CacheRequest>( + cacheKey, + new Func>() { + @Override + public Map invoke( + String key) throws Exception { + GetSubClusterPoliciesConfigurationsResponse policyConfigs = + stateStore.getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest + .newInstance()); + return buildPolicyConfigMap(policyConfigs); + } + }); + return cacheRequest; + } + + protected String buildCacheKey(String typeName, String methodName, + String argName) { + StringBuilder buffer = new StringBuilder(); + buffer.append(typeName).append("."); + buffer.append(methodName); + if (argName != null) { + buffer.append("::"); + buffer.append(argName); + } + return buffer.toString(); + } + + /** + * Internal class that implements the CacheLoader interface that can be + * plugged into the CacheManager to load objects into the cache for specified + * keys. + */ + private static class CacheLoaderImpl implements CacheLoader { + @SuppressWarnings("unchecked") + @Override + public V load(K key) throws CacheLoaderException { + try { + CacheRequest query = (CacheRequest) key; + assert query != null; + return query.getValue(); + } catch (Throwable ex) { + throw new CacheLoaderException(ex); + } + } + + @Override + public Map loadAll(Iterable keys) + throws CacheLoaderException { + // The FACADE does not use the Cache's getAll API. Hence this is not + // required to be implemented + throw new NotImplementedException(); + } + } + + /** + * Internal class that encapsulates the cache key and a function that returns + * the value for the specified key. + */ + private static class CacheRequest { + private K key; + private Func func; + + public CacheRequest(K key, Func func) { + this.key = key; + this.func = func; + } + + public V getValue() throws Exception { + return func.invoke(key); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((key == null) ? 0 : key.hashCode()); + return result; + } + + @SuppressWarnings("unchecked") + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CacheRequest other = (CacheRequest) obj; + if (key == null) { + if (other.key != null) { + return false; + } + } else if (!key.equals(other.key)) { + return false; + } + + return true; + } + } + + /** + * Encapsulates a method that has one parameter and returns a value of the + * type specified by the TResult parameter. + */ + protected interface Func { + TResult invoke(T input) throws Exception; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/package-info.java new file mode 100644 index 00000000000..39a46ec3e4c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.utils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java new file mode 100644 index 00000000000..08aee77fe6d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java @@ -0,0 +1,311 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.uam; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; +import org.apache.hadoop.yarn.util.AsyncCallback; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * A service that manages a pool of UAM managers in + * {@link UnmanagedApplicationManager}. + */ +@Public +@Unstable +public class UnmanagedAMPoolManager extends AbstractService { + public static final Logger LOG = + LoggerFactory.getLogger(UnmanagedAMPoolManager.class); + + // Map from uamId to UAM instances + private Map unmanagedAppMasterMap; + + private Map attemptIdMap; + + private ExecutorService threadpool; + + public UnmanagedAMPoolManager(ExecutorService threadpool) { + super(UnmanagedAMPoolManager.class.getName()); + this.threadpool = threadpool; + } + + @Override + protected void serviceStart() throws Exception { + if (this.threadpool == null) { + this.threadpool = Executors.newCachedThreadPool(); + } + this.unmanagedAppMasterMap = new ConcurrentHashMap<>(); + this.attemptIdMap = new ConcurrentHashMap<>(); + super.serviceStart(); + } + + /** + * Normally we should finish all applications before stop. If there are still + * UAMs running, force kill all of them. Do parallel kill because of + * performance reasons. + * + * TODO: move waiting for the kill to finish into a separate thread, without + * blocking the serviceStop. + */ + @Override + protected void serviceStop() throws Exception { + ExecutorCompletionService completionService = + new ExecutorCompletionService<>(this.threadpool); + if (this.unmanagedAppMasterMap.isEmpty()) { + return; + } + + // Save a local copy of the key set so that it won't change with the map + Set addressList = + new HashSet<>(this.unmanagedAppMasterMap.keySet()); + LOG.warn("Abnormal shutdown of UAMPoolManager, still {} UAMs in map", + addressList.size()); + + for (final String uamId : addressList) { + completionService.submit(new Callable() { + @Override + public KillApplicationResponse call() throws Exception { + try { + LOG.info("Force-killing UAM id " + uamId + " for application " + + attemptIdMap.get(uamId)); + return unmanagedAppMasterMap.remove(uamId).forceKillApplication(); + } catch (Exception e) { + LOG.error("Failed to kill unmanaged application master", e); + return null; + } + } + }); + } + + for (int i = 0; i < addressList.size(); ++i) { + try { + Future future = completionService.take(); + future.get(); + } catch (Exception e) { + LOG.error("Failed to kill unmanaged application master", e); + } + } + this.attemptIdMap.clear(); + super.serviceStop(); + } + + /** + * Create a new UAM and register the application, without specifying uamId and + * appId. We will ask for an appId from RM and use it as the uamId. + * + * @param registerRequest RegisterApplicationMasterRequest + * @param conf configuration for this UAM + * @param queueName queue of the application + * @param submitter submitter name of the UAM + * @param appNameSuffix application name suffix for the UAM + * @return uamId for the UAM + * @throws YarnException if registerApplicationMaster fails + * @throws IOException if registerApplicationMaster fails + */ + public String createAndRegisterNewUAM( + RegisterApplicationMasterRequest registerRequest, Configuration conf, + String queueName, String submitter, String appNameSuffix) + throws YarnException, IOException { + ApplicationId appId = null; + ApplicationClientProtocol rmClient; + try { + UserGroupInformation appSubmitter = + UserGroupInformation.createRemoteUser(submitter); + rmClient = AMRMClientUtils.createRMProxy(conf, + ApplicationClientProtocol.class, appSubmitter, null); + + // Get a new appId from RM + GetNewApplicationResponse response = + rmClient.getNewApplication(GetNewApplicationRequest.newInstance()); + if (response == null) { + throw new YarnException("getNewApplication got null response"); + } + appId = response.getApplicationId(); + LOG.info("Received new application ID {} from RM", appId); + } finally { + rmClient = null; + } + + createAndRegisterNewUAM(appId.toString(), registerRequest, conf, appId, + queueName, submitter, appNameSuffix); + return appId.toString(); + } + + /** + * Create a new UAM and register the application, using the provided uamId and + * appId. + * + * @param uamId identifier for the UAM + * @param registerRequest RegisterApplicationMasterRequest + * @param conf configuration for this UAM + * @param appId application id for the UAM + * @param queueName queue of the application + * @param submitter submitter name of the UAM + * @param appNameSuffix application name suffix for the UAM + * @return RegisterApplicationMasterResponse + * @throws YarnException if registerApplicationMaster fails + * @throws IOException if registerApplicationMaster fails + */ + public RegisterApplicationMasterResponse createAndRegisterNewUAM(String uamId, + RegisterApplicationMasterRequest registerRequest, Configuration conf, + ApplicationId appId, String queueName, String submitter, + String appNameSuffix) throws YarnException, IOException { + + if (this.unmanagedAppMasterMap.containsKey(uamId)) { + throw new YarnException("UAM " + uamId + " already exists"); + } + UnmanagedApplicationManager uam = + createUAM(conf, appId, queueName, submitter, appNameSuffix); + // Put the UAM into map first before initializing it to avoid additional UAM + // for the same uamId being created concurrently + this.unmanagedAppMasterMap.put(uamId, uam); + + RegisterApplicationMasterResponse response = null; + try { + LOG.info("Creating and registering UAM id {} for application {}", uamId, + appId); + response = uam.createAndRegisterApplicationMaster(registerRequest); + } catch (Exception e) { + // Add the map earlier and remove here if register failed because we want + // to make sure there is only one uam instance per uamId at any given time + this.unmanagedAppMasterMap.remove(uamId); + throw e; + } + + this.attemptIdMap.put(uamId, uam.getAttemptId()); + return response; + } + + /** + * Creates the UAM instance. Pull out to make unit test easy. + * + * @param conf Configuration + * @param appId application id + * @param queueName queue of the application + * @param submitter submitter name of the application + * @param appNameSuffix application name suffix + * @return the UAM instance + */ + @VisibleForTesting + protected UnmanagedApplicationManager createUAM(Configuration conf, + ApplicationId appId, String queueName, String submitter, + String appNameSuffix) { + return new UnmanagedApplicationManager(conf, appId, queueName, submitter, + appNameSuffix); + } + + /** + * AllocateAsync to an UAM. + * + * @param uamId identifier for the UAM + * @param request AllocateRequest + * @param callback callback for response + * @throws YarnException if allocate fails + * @throws IOException if allocate fails + */ + public void allocateAsync(String uamId, AllocateRequest request, + AsyncCallback callback) + throws YarnException, IOException { + if (!this.unmanagedAppMasterMap.containsKey(uamId)) { + throw new YarnException("UAM " + uamId + " does not exist"); + } + this.unmanagedAppMasterMap.get(uamId).allocateAsync(request, callback); + } + + /** + * Finish an UAM/application. + * + * @param uamId identifier for the UAM + * @param request FinishApplicationMasterRequest + * @return FinishApplicationMasterResponse + * @throws YarnException if finishApplicationMaster call fails + * @throws IOException if finishApplicationMaster call fails + */ + public FinishApplicationMasterResponse finishApplicationMaster(String uamId, + FinishApplicationMasterRequest request) + throws YarnException, IOException { + if (!this.unmanagedAppMasterMap.containsKey(uamId)) { + throw new YarnException("UAM " + uamId + " does not exist"); + } + LOG.info("Finishing application for UAM id {} ", uamId); + FinishApplicationMasterResponse response = + this.unmanagedAppMasterMap.get(uamId).finishApplicationMaster(request); + + if (response.getIsUnregistered()) { + // Only remove the UAM when the unregister finished + this.unmanagedAppMasterMap.remove(uamId); + this.attemptIdMap.remove(uamId); + LOG.info("UAM id {} is unregistered", uamId); + } + return response; + } + + /** + * Get the id of all running UAMs. + * + * @return uamId set + */ + public Set getAllUAMIds() { + // Return a clone of the current id set for concurrency reasons, so that the + // returned map won't change with the actual map + return new HashSet(this.unmanagedAppMasterMap.keySet()); + } + + /** + * Return whether an UAM exists. + * + * @param uamId identifier for the UAM + * @return UAM exists or not + */ + public boolean hasUAMId(String uamId) { + return this.unmanagedAppMasterMap.containsKey(uamId); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java new file mode 100644 index 00000000000..60a9a277eac --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java @@ -0,0 +1,607 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.uam; + +import java.io.IOException; +import java.lang.Thread.UncaughtExceptionHandler; +import java.util.EnumSet; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.factories.RecordFactory; +import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; +import org.apache.hadoop.yarn.util.AsyncCallback; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +/** + * UnmanagedApplicationManager is used to register unmanaged application and + * negotiate for resources from resource managers. An unmanagedAM is an AM that + * is not launched and managed by the RM. Allocate calls are handled + * asynchronously using {@link AsyncCallback}. + */ +@Public +@Unstable +public class UnmanagedApplicationManager { + private static final Logger LOG = + LoggerFactory.getLogger(UnmanagedApplicationManager.class); + private static final long AM_STATE_WAIT_TIMEOUT_MS = 10000; + private static final String APP_NAME = "UnmanagedAM"; + private static final String DEFAULT_QUEUE_CONFIG = "uam.default.queue.name"; + + private BlockingQueue requestQueue; + private AMRequestHandlerThread handlerThread; + private ApplicationMasterProtocol rmProxy; + private ApplicationId applicationId; + private ApplicationAttemptId attemptId; + private String submitter; + private String appNameSuffix; + private Configuration conf; + private String queueName; + private UserGroupInformation userUgi; + private RegisterApplicationMasterRequest registerRequest; + private int lastResponseId; + private ApplicationClientProtocol rmClient; + private long asyncApiPollIntervalMillis; + private RecordFactory recordFactory; + + public UnmanagedApplicationManager(Configuration conf, ApplicationId appId, + String queueName, String submitter, String appNameSuffix) { + Preconditions.checkNotNull(conf, "Configuration cannot be null"); + Preconditions.checkNotNull(appId, "ApplicationId cannot be null"); + Preconditions.checkNotNull(submitter, "App submitter cannot be null"); + + this.conf = conf; + this.applicationId = appId; + this.queueName = queueName; + this.submitter = submitter; + this.appNameSuffix = appNameSuffix; + this.handlerThread = new AMRequestHandlerThread(); + this.requestQueue = new LinkedBlockingQueue<>(); + this.rmProxy = null; + this.registerRequest = null; + this.recordFactory = RecordFactoryProvider.getRecordFactory(conf); + this.asyncApiPollIntervalMillis = conf.getLong( + YarnConfiguration. + YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS, + YarnConfiguration. + DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS); + } + + /** + * Registers this {@link UnmanagedApplicationManager} with the resource + * manager. + * + * @param request the register request + * @return the register response + * @throws YarnException if register fails + * @throws IOException if register fails + */ + public RegisterApplicationMasterResponse createAndRegisterApplicationMaster( + RegisterApplicationMasterRequest request) + throws YarnException, IOException { + // This need to be done first in this method, because it is used as an + // indication that this method is called (and perhaps blocked due to RM + // connection and not finished yet) + this.registerRequest = request; + + // attemptId will be available after this call + UnmanagedAMIdentifier identifier = + initializeUnmanagedAM(this.applicationId); + + try { + this.userUgi = UserGroupInformation.createProxyUser( + identifier.getAttemptId().toString(), + UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + LOG.error("Exception while trying to get current user", e); + throw new YarnRuntimeException(e); + } + + this.rmProxy = createRMProxy(ApplicationMasterProtocol.class, this.conf, + this.userUgi, identifier.getToken()); + + LOG.info("Registering the Unmanaged application master {}", this.attemptId); + RegisterApplicationMasterResponse response = + this.rmProxy.registerApplicationMaster(this.registerRequest); + + // Only when register succeed that we start the heartbeat thread + this.handlerThread.setUncaughtExceptionHandler( + new HeartBeatThreadUncaughtExceptionHandler()); + this.handlerThread.setDaemon(true); + this.handlerThread.start(); + + this.lastResponseId = 0; + return response; + } + + /** + * Unregisters from the resource manager and stops the request handler thread. + * + * @param request the finishApplicationMaster request + * @return the response + * @throws YarnException if finishAM call fails + * @throws IOException if finishAM call fails + */ + public FinishApplicationMasterResponse finishApplicationMaster( + FinishApplicationMasterRequest request) + throws YarnException, IOException { + + this.handlerThread.shutdown(); + + if (this.rmProxy == null) { + if (this.registerRequest != null) { + // This is possible if the async registerApplicationMaster is still + // blocked and retrying. Return a dummy response in this case. + LOG.warn("Unmanaged AM still not successfully launched/registered yet." + + " Stopping the UAM client thread anyways."); + return FinishApplicationMasterResponse.newInstance(false); + } else { + throw new YarnException("finishApplicationMaster should not " + + "be called before createAndRegister"); + } + } + return AMRMClientUtils.finishAMWithReRegister(request, this.rmProxy, + this.registerRequest, this.attemptId); + } + + /** + * Force kill the UAM. + * + * @return kill response + * @throws IOException if fails to create rmProxy + * @throws YarnException if force kill fails + */ + public KillApplicationResponse forceKillApplication() + throws IOException, YarnException { + KillApplicationRequest request = + KillApplicationRequest.newInstance(this.attemptId.getApplicationId()); + + this.handlerThread.shutdown(); + + if (this.rmClient == null) { + this.rmClient = createRMProxy(ApplicationClientProtocol.class, this.conf, + UserGroupInformation.createRemoteUser(this.submitter), null); + } + return this.rmClient.forceKillApplication(request); + } + + /** + * Sends the specified heart beat request to the resource manager and invokes + * the callback asynchronously with the response. + * + * @param request the allocate request + * @param callback the callback method for the request + * @throws YarnException if registerAM is not called yet + */ + public void allocateAsync(AllocateRequest request, + AsyncCallback callback) throws YarnException { + try { + this.requestQueue.put(new AsyncAllocateRequestInfo(request, callback)); + } catch (InterruptedException ex) { + // Should not happen as we have MAX_INT queue length + LOG.debug("Interrupted while waiting to put on response queue", ex); + } + // Two possible cases why the UAM is not successfully registered yet: + // 1. registerApplicationMaster is not called at all. Should throw here. + // 2. registerApplicationMaster is called but hasn't successfully returned. + // + // In case 2, we have already save the allocate request above, so if the + // registration succeed later, no request is lost. + if (this.rmProxy == null) { + if (this.registerRequest != null) { + LOG.info("Unmanaged AM still not successfully launched/registered yet." + + " Saving the allocate request and send later."); + } else { + throw new YarnException( + "AllocateAsync should not be called before createAndRegister"); + } + } + } + + /** + * Returns the application attempt id of the UAM. + * + * @return attempt id of the UAM + */ + public ApplicationAttemptId getAttemptId() { + return this.attemptId; + } + + /** + * Returns RM proxy for the specified protocol type. Unit test cases can + * override this method and return mock proxy instances. + * + * @param protocol protocal of the proxy + * @param config configuration + * @param user ugi for the proxy connection + * @param token token for the connection + * @param type of the proxy + * @return the proxy instance + * @throws IOException if fails to create the proxy + */ + protected T createRMProxy(Class protocol, Configuration config, + UserGroupInformation user, Token token) + throws IOException { + return AMRMClientUtils.createRMProxy(config, protocol, user, token); + } + + /** + * Launch and initialize an unmanaged AM. First, it creates a new application + * on the RM and negotiates a new attempt id. Then it waits for the RM + * application attempt state to reach YarnApplicationAttemptState.LAUNCHED + * after which it returns the AM-RM token and the attemptId. + * + * @param appId application id + * @return the UAM identifier + * @throws IOException if initialize fails + * @throws YarnException if initialize fails + */ + protected UnmanagedAMIdentifier initializeUnmanagedAM(ApplicationId appId) + throws IOException, YarnException { + try { + UserGroupInformation appSubmitter = + UserGroupInformation.createRemoteUser(this.submitter); + this.rmClient = createRMProxy(ApplicationClientProtocol.class, this.conf, + appSubmitter, null); + + // Submit the application + submitUnmanagedApp(appId); + + // Monitor the application attempt to wait for launch state + ApplicationAttemptReport attemptReport = monitorCurrentAppAttempt(appId, + EnumSet.of(YarnApplicationState.ACCEPTED, + YarnApplicationState.RUNNING, YarnApplicationState.KILLED, + YarnApplicationState.FAILED, YarnApplicationState.FINISHED), + YarnApplicationAttemptState.LAUNCHED); + this.attemptId = attemptReport.getApplicationAttemptId(); + return getUAMIdentifier(); + } finally { + this.rmClient = null; + } + } + + private void submitUnmanagedApp(ApplicationId appId) + throws YarnException, IOException { + SubmitApplicationRequest submitRequest = + this.recordFactory.newRecordInstance(SubmitApplicationRequest.class); + + ApplicationSubmissionContext context = this.recordFactory + .newRecordInstance(ApplicationSubmissionContext.class); + + context.setApplicationId(appId); + context.setApplicationName(APP_NAME + "-" + appNameSuffix); + if (StringUtils.isBlank(this.queueName)) { + context.setQueue(this.conf.get(DEFAULT_QUEUE_CONFIG, + YarnConfiguration.DEFAULT_QUEUE_NAME)); + } else { + context.setQueue(this.queueName); + } + + ContainerLaunchContext amContainer = + this.recordFactory.newRecordInstance(ContainerLaunchContext.class); + Resource resource = BuilderUtils.newResource(1024, 1); + context.setResource(resource); + context.setAMContainerSpec(amContainer); + submitRequest.setApplicationSubmissionContext(context); + + context.setUnmanagedAM(true); + + LOG.info("Submitting unmanaged application {}", appId); + this.rmClient.submitApplication(submitRequest); + } + + /** + * Monitor the submitted application and attempt until it reaches certain + * states. + * + * @param appId Application Id of application to be monitored + * @param appStates acceptable application state + * @param attemptState acceptable application attempt state + * @return the application report + * @throws YarnException if getApplicationReport fails + * @throws IOException if getApplicationReport fails + */ + private ApplicationAttemptReport monitorCurrentAppAttempt(ApplicationId appId, + Set appStates, + YarnApplicationAttemptState attemptState) + throws YarnException, IOException { + + long startTime = System.currentTimeMillis(); + ApplicationAttemptId appAttemptId = null; + while (true) { + if (appAttemptId == null) { + // Get application report for the appId we are interested in + ApplicationReport report = getApplicationReport(appId); + YarnApplicationState state = report.getYarnApplicationState(); + if (appStates.contains(state)) { + if (state != YarnApplicationState.ACCEPTED) { + throw new YarnRuntimeException( + "Received non-accepted application state: " + state + + ". Application " + appId + " not the first attempt?"); + } + appAttemptId = + getApplicationReport(appId).getCurrentApplicationAttemptId(); + } else { + LOG.info("Current application state of {} is {}, will retry later.", + appId, state); + } + } + + if (appAttemptId != null) { + GetApplicationAttemptReportRequest req = this.recordFactory + .newRecordInstance(GetApplicationAttemptReportRequest.class); + req.setApplicationAttemptId(appAttemptId); + ApplicationAttemptReport attemptReport = this.rmClient + .getApplicationAttemptReport(req).getApplicationAttemptReport(); + if (attemptState + .equals(attemptReport.getYarnApplicationAttemptState())) { + return attemptReport; + } + LOG.info("Current attempt state of " + appAttemptId + " is " + + attemptReport.getYarnApplicationAttemptState() + + ", waiting for current attempt to reach " + attemptState); + } + + try { + Thread.sleep(this.asyncApiPollIntervalMillis); + } catch (InterruptedException e) { + LOG.warn("Interrupted while waiting for current attempt of " + appId + + " to reach " + attemptState); + } + + if (System.currentTimeMillis() - startTime > AM_STATE_WAIT_TIMEOUT_MS) { + throw new RuntimeException("Timeout for waiting current attempt of " + + appId + " to reach " + attemptState); + } + } + } + + /** + * Gets the identifier of the unmanaged AM. + * + * @return the identifier of the unmanaged AM. + * @throws IOException if getApplicationReport fails + * @throws YarnException if getApplicationReport fails + */ + protected UnmanagedAMIdentifier getUAMIdentifier() + throws IOException, YarnException { + Token token = null; + org.apache.hadoop.yarn.api.records.Token amrmToken = + getApplicationReport(this.attemptId.getApplicationId()).getAMRMToken(); + if (amrmToken != null) { + token = ConverterUtils.convertFromYarn(amrmToken, (Text) null); + } else { + LOG.warn( + "AMRMToken not found in the application report for application: {}", + this.attemptId.getApplicationId()); + } + return new UnmanagedAMIdentifier(this.attemptId, token); + } + + private ApplicationReport getApplicationReport(ApplicationId appId) + throws YarnException, IOException { + GetApplicationReportRequest request = + this.recordFactory.newRecordInstance(GetApplicationReportRequest.class); + request.setApplicationId(appId); + return this.rmClient.getApplicationReport(request).getApplicationReport(); + } + + /** + * Data structure that encapsulates the application attempt identifier and the + * AMRMTokenIdentifier. Make it public because clients with HA need it. + */ + public static class UnmanagedAMIdentifier { + private ApplicationAttemptId attemptId; + private Token token; + + public UnmanagedAMIdentifier(ApplicationAttemptId attemptId, + Token token) { + this.attemptId = attemptId; + this.token = token; + } + + public ApplicationAttemptId getAttemptId() { + return this.attemptId; + } + + public Token getToken() { + return this.token; + } + } + + /** + * Data structure that encapsulates AllocateRequest and AsyncCallback + * instance. + */ + public static class AsyncAllocateRequestInfo { + private AllocateRequest request; + private AsyncCallback callback; + + public AsyncAllocateRequestInfo(AllocateRequest request, + AsyncCallback callback) { + Preconditions.checkArgument(request != null, + "AllocateRequest cannot be null"); + Preconditions.checkArgument(callback != null, "Callback cannot be null"); + + this.request = request; + this.callback = callback; + } + + public AsyncCallback getCallback() { + return this.callback; + } + + public AllocateRequest getRequest() { + return this.request; + } + } + + @VisibleForTesting + public int getRequestQueueSize() { + return this.requestQueue.size(); + } + + /** + * Extends Thread and provides an implementation that is used for processing + * the AM heart beat request asynchronously and sending back the response + * using the callback method registered with the system. + */ + public class AMRequestHandlerThread extends Thread { + + // Indication flag for the thread to keep running + private volatile boolean keepRunning; + + public AMRequestHandlerThread() { + super("UnmanagedApplicationManager Heartbeat Handler Thread"); + this.keepRunning = true; + } + + /** + * Shutdown the thread. + */ + public void shutdown() { + this.keepRunning = false; + this.interrupt(); + } + + @Override + public void run() { + while (keepRunning) { + AsyncAllocateRequestInfo requestInfo; + try { + requestInfo = requestQueue.take(); + if (requestInfo == null) { + throw new YarnException( + "Null requestInfo taken from request queue"); + } + if (!keepRunning) { + break; + } + + // change the response id before forwarding the allocate request as we + // could have different values for each UAM + AllocateRequest request = requestInfo.getRequest(); + if (request == null) { + throw new YarnException("Null allocateRequest from requestInfo"); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Sending Heartbeat to Unmanaged AM. AskList:" + + ((request.getAskList() == null) ? " empty" + : request.getAskList().size())); + } + + request.setResponseId(lastResponseId); + AllocateResponse response = AMRMClientUtils.allocateWithReRegister( + request, rmProxy, registerRequest, attemptId); + if (response == null) { + throw new YarnException("Null allocateResponse from allocate"); + } + + lastResponseId = response.getResponseId(); + // update token if RM has reissued/renewed + if (response.getAMRMToken() != null) { + LOG.debug("Received new AMRMToken"); + YarnServerSecurityUtils.updateAMRMToken(response.getAMRMToken(), + userUgi, conf); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Received Heartbeat reply from RM. Allocated Containers:" + + ((response.getAllocatedContainers() == null) ? " empty" + : response.getAllocatedContainers().size())); + } + + if (requestInfo.getCallback() == null) { + throw new YarnException("Null callback from requestInfo"); + } + requestInfo.getCallback().callback(response); + } catch (InterruptedException ex) { + if (LOG.isDebugEnabled()) { + LOG.debug("Interrupted while waiting for queue", ex); + } + } catch (IOException ex) { + LOG.warn( + "IO Error occurred while processing heart beat for " + attemptId, + ex); + } catch (Throwable ex) { + LOG.warn( + "Error occurred while processing heart beat for " + attemptId, + ex); + } + } + + LOG.info("UnmanagedApplicationManager has been stopped for {}. " + + "AMRequestHandlerThread thread is exiting", attemptId); + } + } + + /** + * Uncaught exception handler for the background heartbeat thread. + */ + protected class HeartBeatThreadUncaughtExceptionHandler + implements UncaughtExceptionHandler { + @Override + public void uncaughtException(Thread t, Throwable e) { + LOG.error("Heartbeat thread {} for application attempt {} crashed!", + t.getName(), attemptId, e); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/package-info.java new file mode 100644 index 00000000000..0e78094d60a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.uam; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java new file mode 100644 index 00000000000..7993bd8a5ec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.utils; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; +import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility class for AMRMClient. + */ +@Private +public final class AMRMClientUtils { + private static final Logger LOG = + LoggerFactory.getLogger(AMRMClientUtils.class); + + public static final String APP_ALREADY_REGISTERED_MESSAGE = + "Application Master is already registered : "; + + private AMRMClientUtils() { + } + + /** + * Handle ApplicationNotRegistered exception and re-register. + * + * @param attemptId app attemptId + * @param rmProxy RM proxy instance + * @param registerRequest the AM re-register request + * @throws YarnException if re-register fails + */ + public static void handleNotRegisteredExceptionAndReRegister( + ApplicationAttemptId attemptId, ApplicationMasterProtocol rmProxy, + RegisterApplicationMasterRequest registerRequest) throws YarnException { + LOG.info("App attempt {} not registered, most likely due to RM failover. " + + " Trying to re-register.", attemptId); + try { + rmProxy.registerApplicationMaster(registerRequest); + } catch (Exception e) { + if (e instanceof InvalidApplicationMasterRequestException + && e.getMessage().contains(APP_ALREADY_REGISTERED_MESSAGE)) { + LOG.info("Concurrent thread successfully registered, moving on."); + } else { + LOG.error("Error trying to re-register AM", e); + throw new YarnException(e); + } + } + } + + /** + * Helper method for client calling ApplicationMasterProtocol.allocate that + * handles re-register if RM fails over. + * + * @param request allocate request + * @param rmProxy RM proxy + * @param registerRequest the register request for re-register + * @param attemptId application attempt id + * @return allocate response + * @throws YarnException if RM call fails + * @throws IOException if RM call fails + */ + public static AllocateResponse allocateWithReRegister(AllocateRequest request, + ApplicationMasterProtocol rmProxy, + RegisterApplicationMasterRequest registerRequest, + ApplicationAttemptId attemptId) throws YarnException, IOException { + try { + return rmProxy.allocate(request); + } catch (ApplicationMasterNotRegisteredException e) { + handleNotRegisteredExceptionAndReRegister(attemptId, rmProxy, + registerRequest); + // reset responseId after re-register + request.setResponseId(0); + // retry allocate + return allocateWithReRegister(request, rmProxy, registerRequest, + attemptId); + } + } + + /** + * Helper method for client calling + * ApplicationMasterProtocol.finishApplicationMaster that handles re-register + * if RM fails over. + * + * @param request finishApplicationMaster request + * @param rmProxy RM proxy + * @param registerRequest the register request for re-register + * @param attemptId application attempt id + * @return finishApplicationMaster response + * @throws YarnException if RM call fails + * @throws IOException if RM call fails + */ + public static FinishApplicationMasterResponse finishAMWithReRegister( + FinishApplicationMasterRequest request, ApplicationMasterProtocol rmProxy, + RegisterApplicationMasterRequest registerRequest, + ApplicationAttemptId attemptId) throws YarnException, IOException { + try { + return rmProxy.finishApplicationMaster(request); + } catch (ApplicationMasterNotRegisteredException ex) { + handleNotRegisteredExceptionAndReRegister(attemptId, rmProxy, + registerRequest); + // retry finishAM after re-register + return finishAMWithReRegister(request, rmProxy, registerRequest, + attemptId); + } + } + + /** + * Create a proxy for the specified protocol. + * + * @param configuration Configuration to generate {@link ClientRMProxy} + * @param protocol Protocol for the proxy + * @param user the user on whose behalf the proxy is being created + * @param token the auth token to use for connection + * @param Type information of the proxy + * @return Proxy to the RM + * @throws IOException on failure + */ + @Public + @Unstable + public static T createRMProxy(final Configuration configuration, + final Class protocol, UserGroupInformation user, + final Token token) throws IOException { + try { + String rmClusterId = configuration.get(YarnConfiguration.RM_CLUSTER_ID, + YarnConfiguration.DEFAULT_RM_CLUSTER_ID); + LOG.info("Creating RMProxy to RM {} for protocol {} for user {}", + rmClusterId, protocol.getSimpleName(), user); + if (token != null) { + token.setService(ClientRMProxy.getAMRMTokenService(configuration)); + user.addToken(token); + setAuthModeInConf(configuration); + } + final T proxyConnection = user.doAs(new PrivilegedExceptionAction() { + @Override + public T run() throws Exception { + return ClientRMProxy.createRMProxy(configuration, protocol); + } + }); + return proxyConnection; + + } catch (InterruptedException e) { + throw new YarnRuntimeException(e); + } + } + + private static void setAuthModeInConf(Configuration conf) { + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + SaslRpcServer.AuthMethod.TOKEN.toString()); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java index 9af556e7c8b..e61798d2e38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java @@ -23,13 +23,16 @@ import java.nio.ByteBuffer; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; @@ -42,8 +45,8 @@ import org.slf4j.LoggerFactory; */ @Private public final class YarnServerSecurityUtils { - private static final Logger LOG = LoggerFactory - .getLogger(YarnServerSecurityUtils.class); + private static final Logger LOG = + LoggerFactory.getLogger(YarnServerSecurityUtils.class); private YarnServerSecurityUtils() { } @@ -55,8 +58,7 @@ public final class YarnServerSecurityUtils { * @return the AMRMTokenIdentifier instance for the current user * @throws YarnException */ - public static AMRMTokenIdentifier authorizeRequest() - throws YarnException { + public static AMRMTokenIdentifier authorizeRequest() throws YarnException { UserGroupInformation remoteUgi; try { @@ -82,9 +84,8 @@ public final class YarnServerSecurityUtils { } } catch (IOException e) { tokenFound = false; - message = - "Got exception while looking for AMRMToken for user " - + remoteUgi.getUserName(); + message = "Got exception while looking for AMRMToken for user " + + remoteUgi.getUserName(); } if (!tokenFound) { @@ -112,9 +113,30 @@ public final class YarnServerSecurityUtils { return result; } + /** + * Update the new AMRMToken into the ugi used for RM proxy. + * + * @param token the new AMRMToken sent by RM + * @param user ugi used for RM proxy + * @param conf configuration + */ + public static void updateAMRMToken( + org.apache.hadoop.yarn.api.records.Token token, UserGroupInformation user, + Configuration conf) { + Token amrmToken = new Token( + token.getIdentifier().array(), token.getPassword().array(), + new Text(token.getKind()), new Text(token.getService())); + // Preserve the token service sent by the RM when adding the token + // to ensure we replace the previous token setup by the RM. + // Afterwards we can update the service address for the RPC layer. + user.addToken(amrmToken); + amrmToken.setService(ClientRMProxy.getAMRMTokenService(conf)); + } + /** * Parses the container launch context and returns a Credential instance that - * contains all the tokens from the launch context. + * contains all the tokens from the launch context. + * * @param launchContext * @return the credential instance * @throws IOException @@ -130,8 +152,7 @@ public final class YarnServerSecurityUtils { buf.reset(tokens); credentials.readTokenStorageStream(buf); if (LOG.isDebugEnabled()) { - for (Token tk : credentials - .getAllTokens()) { + for (Token tk : credentials.getAllTokens()) { LOG.debug(tk.getService() + " = " + tk.toString()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java index 798c3726739..87c554d4bec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java @@ -36,10 +36,9 @@ import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; -import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; import com.google.inject.Inject; @@ -94,7 +93,7 @@ public class AppAttemptBlock extends HtmlBlock { String message = "Failed to read the application attempt " + appAttemptId + "."; LOG.error(message, e); - html.p()._(message)._(); + html.p().__(message).__(); return; } @@ -140,14 +139,14 @@ public class AppAttemptBlock extends HtmlBlock { if (exceptionWhenGetContainerReports) { html .p() - ._( + .__( "Sorry, Failed to get containers for application attempt" + attemptid - + ".")._(); + + ".").__(); return; } createAttemptHeadRoomTable(html); - html._(InfoBlock.class); + html.__(InfoBlock.class); createTablesForAttemptMetrics(html); @@ -155,7 +154,7 @@ public class AppAttemptBlock extends HtmlBlock { TBODY> tbody = html.table("#containers").thead().tr().th(".id", "Container ID") .th(".node", "Node").th(".exitstatus", "Container Exit Status") - .th(".logs", "Logs")._()._().tbody(); + .th(".logs", "Logs").__().__().tbody(); StringBuilder containersTableData = new StringBuilder("[\n"); for (ContainerReport containerReport : containers) { @@ -186,9 +185,9 @@ public class AppAttemptBlock extends HtmlBlock { } containersTableData.append("]"); html.script().$type("text/javascript") - ._("var containersTableData=" + containersTableData)._(); + .__("var containersTableData=" + containersTableData).__(); - tbody._()._(); + tbody.__().__(); } protected void generateOverview(ApplicationAttemptReport appAttemptReport, @@ -196,18 +195,18 @@ public class AppAttemptBlock extends HtmlBlock { String node) { String amContainerId = appAttempt.getAmContainerId(); info("Application Attempt Overview") - ._( + .__( "Application Attempt State:", appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt .getAppAttemptState()) - ._("AM Container:", + .__("AM Container:", amContainerId == null || containers == null || !hasAMContainer(appAttemptReport.getAMContainerId(), containers) ? null : root_url("container", amContainerId), amContainerId == null ? "N/A" : amContainerId) - ._("Node:", node) - ._( + .__("Node:", node) + .__( "Tracking URL:", appAttempt.getTrackingUrl() == null || appAttempt.getTrackingUrl().equals(UNAVAILABLE) ? null @@ -219,7 +218,7 @@ public class AppAttemptBlock extends HtmlBlock { || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED ? "History" : "ApplicationMaster") - ._( + .__( "Diagnostics Info:", appAttempt.getDiagnosticsInfo() == null ? "" : appAttempt .getDiagnosticsInfo()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java index 11bd9b47185..693aa046df5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java @@ -30,6 +30,7 @@ import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.http.RestCsrfPreventionFilter; @@ -55,9 +56,9 @@ import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -70,6 +71,8 @@ public class AppBlock extends HtmlBlock { protected ApplicationBaseProtocol appBaseProt; protected Configuration conf; protected ApplicationId appID = null; + private boolean unsecuredUI = true; + @Inject protected AppBlock(ApplicationBaseProtocol appBaseProt, ViewContext ctx, @@ -77,6 +80,9 @@ public class AppBlock extends HtmlBlock { super(ctx); this.appBaseProt = appBaseProt; this.conf = conf; + // check if UI is unsecured. + String httpAuth = conf.get(CommonConfigurationKeys.HADOOP_HTTP_AUTHENTICATION_TYPE); + this.unsecuredUI = (httpAuth != null) && httpAuth.equals("simple"); } @Override @@ -116,7 +122,7 @@ public class AppBlock extends HtmlBlock { } catch (Exception e) { String message = "Failed to read the application " + appID + "."; LOG.error(message, e); - html.p()._(message)._(); + html.p().__(message).__(); return; } @@ -129,15 +135,21 @@ public class AppBlock extends HtmlBlock { setTitle(join("Application ", aid)); + // YARN-6890. for secured cluster allow anonymous UI access, application kill + // shouldn't be there. + boolean unsecuredUIForSecuredCluster = UserGroupInformation.isSecurityEnabled() + && this.unsecuredUI; + if (webUiType != null && webUiType.equals(YarnWebParams.RM_WEB_UI) && conf.getBoolean(YarnConfiguration.RM_WEBAPP_UI_ACTIONS_ENABLED, - YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED)) { + YarnConfiguration.DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED) + && !unsecuredUIForSecuredCluster) { // Application Kill html.div() .button() - .$onclick("confirmAction()").b("Kill Application")._() - ._(); + .$onclick("confirmAction()").b("Kill Application").__() + .__(); StringBuilder script = new StringBuilder(); script.append("function confirmAction() {") @@ -160,7 +172,7 @@ public class AppBlock extends HtmlBlock { .append(" }") .append("}"); - html.script().$type("text/javascript")._(script.toString())._(); + html.script().$type("text/javascript").__(script.toString()).__(); } String schedulerPath = WebAppUtils.getResolvedRMWebAppURLWithScheme(conf) + @@ -185,13 +197,13 @@ public class AppBlock extends HtmlBlock { String message = "Failed to read the attempts of the application " + appID + "."; LOG.error(message, e); - html.p()._(message)._(); + html.p().__(message).__(); return; } createApplicationMetricsTable(html); - html._(InfoBlock.class); + html.__(InfoBlock.class); generateApplicationTable(html, callerUGI, attempts); @@ -207,25 +219,25 @@ public class AppBlock extends HtmlBlock { private void generateOverviewTable(AppInfo app, String schedulerPath, String webUiType, ApplicationReport appReport) { ResponseInfo overviewTable = info("Application Overview") - ._("User:", schedulerPath, app.getUser()) - ._("Name:", app.getName()) - ._("Application Type:", app.getType()) - ._("Application Tags:", + .__("User:", schedulerPath, app.getUser()) + .__("Name:", app.getName()) + .__("Application Type:", app.getType()) + .__("Application Tags:", app.getApplicationTags() == null ? "" : app.getApplicationTags()) - ._("Application Priority:", clarifyAppPriority(app.getPriority())) - ._( + .__("Application Priority:", clarifyAppPriority(app.getPriority())) + .__( "YarnApplicationState:", app.getAppState() == null ? UNAVAILABLE : clarifyAppState(app .getAppState())) - ._("Queue:", schedulerPath, app.getQueue()) - ._("FinalStatus Reported by AM:", + .__("Queue:", schedulerPath, app.getQueue()) + .__("FinalStatus Reported by AM:", clairfyAppFinalStatus(app.getFinalAppStatus())) - ._("Started:", Times.format(app.getStartedTime())) - ._( + .__("Started:", Times.format(app.getStartedTime())) + .__( "Elapsed:", StringUtils.formatTime(Times.elapsed(app.getStartedTime(), app.getFinishedTime()))) - ._( + .__( "Tracking URL:", app.getTrackingUrl() == null || app.getTrackingUrl().equals(UNAVAILABLE) ? null : root_url(app @@ -240,31 +252,31 @@ public class AppBlock extends HtmlBlock { && webUiType.equals(YarnWebParams.RM_WEB_UI)) { LogAggregationStatus status = getLogAggregationStatus(); if (status == null) { - overviewTable._("Log Aggregation Status:", "N/A"); + overviewTable.__("Log Aggregation Status:", "N/A"); } else if (status == LogAggregationStatus.DISABLED || status == LogAggregationStatus.NOT_START || status == LogAggregationStatus.SUCCEEDED) { - overviewTable._("Log Aggregation Status:", status.name()); + overviewTable.__("Log Aggregation Status:", status.name()); } else { - overviewTable._("Log Aggregation Status:", + overviewTable.__("Log Aggregation Status:", root_url("logaggregationstatus", app.getAppId()), status.name()); } long timeout = appReport.getApplicationTimeouts() .get(ApplicationTimeoutType.LIFETIME).getRemainingTime(); if (timeout < 0) { - overviewTable._("Application Timeout (Remaining Time):", "Unlimited"); + overviewTable.__("Application Timeout (Remaining Time):", "Unlimited"); } else { - overviewTable._("Application Timeout (Remaining Time):", + overviewTable.__("Application Timeout (Remaining Time):", String.format("%d seconds", timeout)); } } - overviewTable._("Diagnostics:", + overviewTable.__("Diagnostics:", app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo()); - overviewTable._("Unmanaged Application:", app.isUnmanagedApp()); - overviewTable._("Application Node Label expression:", + overviewTable.__("Unmanaged Application:", app.isUnmanagedApp()); + overviewTable.__("Application Node Label expression:", app.getAppNodeLabelExpression() == null ? "" : app.getAppNodeLabelExpression()); - overviewTable._("AM container Node Label expression:", + overviewTable.__("AM container Node Label expression:", app.getAmNodeLabelExpression() == null ? "" : app.getAmNodeLabelExpression()); } @@ -276,7 +288,7 @@ public class AppBlock extends HtmlBlock { TBODY> tbody = html.table("#attempts").thead().tr().th(".id", "Attempt ID") .th(".started", "Started").th(".node", "Node").th(".logs", "Logs") - ._()._().tbody(); + .__().__().tbody(); StringBuilder attemptsTableData = new StringBuilder("[\n"); for (final ApplicationAttemptReport appAttemptReport : attempts) { @@ -312,7 +324,7 @@ public class AppBlock extends HtmlBlock { "Failed to read the AM container of the application attempt " + appAttemptReport.getApplicationAttemptId() + "."; LOG.error(message, e); - html.p()._(message)._(); + html.p().__(message).__(); return; } long startTime = 0L; @@ -346,9 +358,9 @@ public class AppBlock extends HtmlBlock { } attemptsTableData.append("]"); html.script().$type("text/javascript") - ._("var attemptsTableData=" + attemptsTableData)._(); + .__("var attemptsTableData=" + attemptsTableData).__(); - tbody._()._(); + tbody.__().__(); } private String clarifyAppState(YarnApplicationState state) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java index 7f42343658d..d836e641177 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java @@ -44,9 +44,9 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; import org.apache.hadoop.yarn.webapp.BadRequestException; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -134,7 +134,7 @@ public class AppsBlock extends HtmlBlock { catch( Exception e) { String message = "Failed to read the applications."; LOG.error(message, e); - html.p()._(message)._(); + html.p().__(message).__(); return; } renderData(html); @@ -147,7 +147,7 @@ public class AppsBlock extends HtmlBlock { .th(".queue", "Queue").th(".priority", "Application Priority") .th(".starttime", "StartTime").th(".finishtime", "FinishTime") .th(".state", "State").th(".finalstatus", "FinalStatus") - .th(".progress", "Progress").th(".ui", "Tracking UI")._()._().tbody(); + .th(".progress", "Progress").th(".ui", "Tracking UI").__().__().tbody(); StringBuilder appsTableData = new StringBuilder("[\n"); for (ApplicationReport appReport : appReports) { @@ -218,8 +218,8 @@ public class AppsBlock extends HtmlBlock { } appsTableData.append("]"); html.script().$type("text/javascript") - ._("var appsTableData=" + appsTableData)._(); + .__("var appsTableData=" + appsTableData).__(); - tbody._()._(); + tbody.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java index 893e82384f2..fa35a3d5273 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ContainerBlock.java @@ -31,7 +31,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -86,7 +85,7 @@ public class ContainerBlock extends HtmlBlock { } catch (Exception e) { String message = "Failed to read the container " + containerid + "."; LOG.error(message, e); - html.p()._(message)._(); + html.p().__(message).__(); return; } @@ -99,32 +98,32 @@ public class ContainerBlock extends HtmlBlock { setTitle(join("Container ", containerid)); info("Container Overview") - ._( + .__( "Container State:", container.getContainerState() == null ? UNAVAILABLE : container .getContainerState()) - ._("Exit Status:", container.getContainerExitStatus()) - ._( + .__("Exit Status:", container.getContainerExitStatus()) + .__( "Node:", container.getNodeHttpAddress() == null ? "#" : container .getNodeHttpAddress(), container.getNodeHttpAddress() == null ? "N/A" : container .getNodeHttpAddress()) - ._("Priority:", container.getPriority()) - ._("Started:", Times.format(container.getStartedTime())) - ._( + .__("Priority:", container.getPriority()) + .__("Started:", Times.format(container.getStartedTime())) + .__( "Elapsed:", StringUtils.formatTime(Times.elapsed(container.getStartedTime(), container.getFinishedTime()))) - ._( + .__( "Resource:", container.getAllocatedMB() + " Memory, " + container.getAllocatedVCores() + " VCores") - ._("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(), + .__("Logs:", container.getLogUrl() == null ? "#" : container.getLogUrl(), container.getLogUrl() == null ? "N/A" : "Logs") - ._("Diagnostics:", container.getDiagnosticsInfo() == null ? + .__("Diagnostics:", container.getDiagnosticsInfo() == null ? "" : container.getDiagnosticsInfo()); - html._(InfoBlock.class); + html.__(InfoBlock.class); } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java index 56db66c3605..75f6fcd254a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java @@ -28,7 +28,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.security.AdminACLsManager; import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.util.Times; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import java.util.ArrayList; @@ -74,19 +74,19 @@ public class ErrorsAndWarningsBlock extends HtmlBlock { } if (!isAdmin) { - html.div().p()._("This page is for admins only.")._()._(); + html.div().p().__("This page is for admins only.").__().__(); return; } if (log instanceof Log4JLogger) { - html._(ErrorMetrics.class); - html._(WarningMetrics.class); + html.__(ErrorMetrics.class); + html.__(WarningMetrics.class); html.div().button().$onclick("reloadPage()").b("View data for the last ") - ._().select().$id("cutoff").option().$value("60")._("1 min")._() - .option().$value("300")._("5 min")._().option().$value("900") - ._("15 min")._().option().$value("3600")._("1 hour")._().option() - .$value("21600")._("6 hours")._().option().$value("43200") - ._("12 hours")._().option().$value("86400")._("24 hours")._()._()._(); + .__().select().$id("cutoff").option().$value("60").__("1 min").__() + .option().$value("300").__("5 min").__().option().$value("900") + .__("15 min").__().option().$value("3600").__("1 hour").__().option() + .$value("21600").__("6 hours").__().option().$value("43200") + .__("12 hours").__().option().$value("86400").__("24 hours").__().__().__(); String script = "function reloadPage() {" + " var timePeriod = $(\"#cutoff\").val();" @@ -97,7 +97,7 @@ public class ErrorsAndWarningsBlock extends HtmlBlock { + " $(element).parent().siblings('.toggle-content').fadeToggle();" + "}"; - html.script().$type("text/javascript")._(script)._(); + html.script().$type("text/javascript").__(script).__(); html.style(".toggle-content { display: none; }"); @@ -110,7 +110,7 @@ public class ErrorsAndWarningsBlock extends HtmlBlock { Hamlet.TBODY> errorsTable = html.table("#messages").thead().tr().th(".message", "Message") .th(".type", "Type").th(".count", "Count") - .th(".lasttime", "Latest Message Time")._()._().tbody(); + .th(".lasttime", "Latest Message Time").__().__().tbody(); // cutoff has to be in seconds cutoff.add((Time.now() - cutoffPeriodSeconds * 1000) / 1000); @@ -145,18 +145,18 @@ public class ErrorsAndWarningsBlock extends HtmlBlock { } cell.pre().a().$href("#").$onclick("toggleContent(this);") - .$style("white-space: pre")._(displayMessage)._()._().div() - .$class("toggle-content").pre()._(message)._()._()._(); + .$style("white-space: pre").__(displayMessage).__().__().div() + .$class("toggle-content").pre().__(message).__().__().__(); } else { - cell.pre()._(message)._()._(); + cell.pre().__(message).__().__(); } Log4jWarningErrorMetricsAppender.Element ele = entry.getValue(); row.td(type).td(String.valueOf(ele.count)) - .td(Times.format(ele.timestampSeconds * 1000))._(); + .td(Times.format(ele.timestampSeconds * 1000)).__(); } } } - errorsTable._()._(); + errorsTable.__().__(); } } @@ -199,19 +199,19 @@ public class ErrorsAndWarningsBlock extends HtmlBlock { html.div().$class("metrics").$style("padding-bottom: 20px"); div.h3(tableHeading).table("#metricsoverview").thead() .$class("ui-widget-header").tr().th().$class("ui-state-default") - ._("Last 1 minute")._().th().$class("ui-state-default") - ._("Last 5 minutes")._().th().$class("ui-state-default") - ._("Last 15 minutes")._().th().$class("ui-state-default") - ._("Last 1 hour")._().th().$class("ui-state-default") - ._("Last 6 hours")._().th().$class("ui-state-default") - ._("Last 12 hours")._().th().$class("ui-state-default") - ._("Last 24 hours")._()._()._().tbody().$class("ui-widget-content") + .__("Last 1 minute").__().th().$class("ui-state-default") + .__("Last 5 minutes").__().th().$class("ui-state-default") + .__("Last 15 minutes").__().th().$class("ui-state-default") + .__("Last 1 hour").__().th().$class("ui-state-default") + .__("Last 6 hours").__().th().$class("ui-state-default") + .__("Last 12 hours").__().th().$class("ui-state-default") + .__("Last 24 hours").__().__().__().tbody().$class("ui-widget-content") .tr().td(String.valueOf(values.get(0))) .td(String.valueOf(values.get(1))).td(String.valueOf(values.get(2))) .td(String.valueOf(values.get(3))).td(String.valueOf(values.get(4))) .td(String.valueOf(values.get(5))).td(String.valueOf(values.get(6))) - ._()._()._(); - div._(); + .__().__().__(); + div.__(); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto index edb2d9ccfba..4e05fbad787 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto @@ -111,11 +111,14 @@ message NodeHeartbeatResponseProto { repeated ContainerIdProto containers_to_be_removed_from_nm = 9; repeated SystemCredentialsForAppsProto system_credentials_for_apps = 10; optional bool areNodeLabelsAcceptedByRM = 11 [default = false]; + // to be deprecated in favour of containers_to_update repeated ContainerProto containers_to_decrease = 12; repeated SignalContainerRequestProto containers_to_signal = 13; optional ResourceProto resource = 14; optional ContainerQueuingLimitProto container_queuing_limit = 15; repeated AppCollectorsMapProto app_collectors_map = 16; + // to be used in place of containers_to_decrease + repeated ContainerProto containers_to_update = 17; } message ContainerQueuingLimitProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto new file mode 100644 index 00000000000..cedf4826b65 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_federation_protos.proto @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.federation.proto"; +option java_outer_classname = "YarnServerFederationProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +import "yarn_protos.proto"; +import "yarn_server_common_protos.proto"; + +message SubClusterIdProto { + optional string id = 1; +} + +enum SubClusterStateProto { + SC_NEW = 1; + SC_RUNNING = 2; + SC_UNHEALTHY = 3; + SC_DECOMMISSIONING = 4; + SC_LOST = 5; + SC_UNREGISTERED = 6; + SC_DECOMMISSIONED = 7; +} + +message SubClusterInfoProto { + optional SubClusterIdProto sub_cluster_id = 1; + optional string aMRM_service_address = 2; + optional string client_rM_service_address = 3; + optional string rM_admin_service_address = 4; + optional string rM_web_service_address = 5; + optional int64 lastHeartBeat = 6; + optional SubClusterStateProto state = 7; + optional int64 lastStartTime = 8; + optional string capability = 9; +} + +message SubClusterRegisterRequestProto { + optional SubClusterInfoProto sub_cluster_info = 1; +} + +message SubClusterRegisterResponseProto { +} + +message SubClusterHeartbeatRequestProto { + optional SubClusterIdProto sub_cluster_id = 1; + optional int64 lastHeartBeat = 2; + optional SubClusterStateProto state = 3; + optional string capability = 4; +} + +message SubClusterHeartbeatResponseProto { +} + +message SubClusterDeregisterRequestProto { + optional SubClusterIdProto sub_cluster_id = 1; + optional SubClusterStateProto state = 2; +} + +message SubClusterDeregisterResponseProto { +} + +message GetSubClusterInfoRequestProto { + optional SubClusterIdProto sub_cluster_id = 1; +} + +message GetSubClusterInfoResponseProto { + optional SubClusterInfoProto sub_cluster_info = 1; +} + +message GetSubClustersInfoRequestProto { + optional bool filter_inactive_subclusters = 1 [default = true]; +} + +message GetSubClustersInfoResponseProto { + repeated SubClusterInfoProto sub_cluster_infos = 1; +} + +message ApplicationHomeSubClusterProto { + optional ApplicationIdProto application_id = 1; + optional SubClusterIdProto home_sub_cluster = 2; +} + +message AddApplicationHomeSubClusterRequestProto { + optional ApplicationHomeSubClusterProto app_subcluster_map = 1; +} + +message AddApplicationHomeSubClusterResponseProto { + optional SubClusterIdProto home_sub_cluster = 1; +} + +message UpdateApplicationHomeSubClusterRequestProto { + optional ApplicationHomeSubClusterProto app_subcluster_map = 1; +} + +message UpdateApplicationHomeSubClusterResponseProto { +} + +message GetApplicationHomeSubClusterRequestProto { + optional ApplicationIdProto application_id = 1; +} + +message GetApplicationHomeSubClusterResponseProto { + optional ApplicationHomeSubClusterProto app_subcluster_map = 1; +} + +message GetApplicationsHomeSubClusterRequestProto { + +} + +message GetApplicationsHomeSubClusterResponseProto { + repeated ApplicationHomeSubClusterProto app_subcluster_map = 1; +} + + +message DeleteApplicationHomeSubClusterRequestProto { + optional ApplicationIdProto application_id = 1; +} + +message DeleteApplicationHomeSubClusterResponseProto { +} + +message SubClusterPolicyConfigurationProto { + optional string queue = 1; + optional string type = 2; + optional bytes params = 3; +} + +message GetSubClusterPolicyConfigurationRequestProto { + optional string queue = 1; +} + +message GetSubClusterPolicyConfigurationResponseProto { + optional SubClusterPolicyConfigurationProto policy_configuration = 1; +} + +message SetSubClusterPolicyConfigurationRequestProto { + optional SubClusterPolicyConfigurationProto policy_configuration = 1; +} + +message SetSubClusterPolicyConfigurationResponseProto { +} + +message GetSubClusterPoliciesConfigurationsRequestProto { +} + +message GetSubClusterPoliciesConfigurationsResponseProto { + repeated SubClusterPolicyConfigurationProto policies_configurations = 1; + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index e5d159b2ad6..9775f5c15a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -34,6 +34,8 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -341,6 +343,7 @@ public class TestRPC { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException, IOException { @@ -385,6 +388,12 @@ public class TestRPC { throws YarnException, IOException { return null; } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return null; + } } public static ContainerTokenIdentifier newContainerTokenIdentifier( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java index b670c364e5f..8c0c73afd80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java @@ -180,14 +180,14 @@ public class TestYarnServerApiClasses { @Test public void testNodeHeartbeatResponsePBImplWithDecreasedContainers() { NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl(); - original.addAllContainersToDecrease( + original.addAllContainersToUpdate( Arrays.asList(getDecreasedContainer(1, 2, 2048, 2), getDecreasedContainer(2, 3, 1024, 1))); NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(original.getProto()); - assertEquals(1, copy.getContainersToDecrease().get(0) + assertEquals(1, copy.getContainersToUpdate().get(0) .getId().getContainerId()); - assertEquals(1024, copy.getContainersToDecrease().get(1) + assertEquals(1024, copy.getContainersToUpdate().get(1) .getResource().getMemorySize()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java similarity index 52% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java index f584c94f7fb..e33d7e19774 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockResourceManagerFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java @@ -16,19 +16,23 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.nodemanager.amrmproxy; +package org.apache.hadoop.yarn.server; import java.io.IOException; +import java.net.ConnectException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import com.google.common.base.Strings; -import org.apache.commons.lang.NotImplementedException; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; @@ -93,8 +97,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityReque import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; -import org.apache.hadoop.yarn.api.ApplicationClientProtocol; -import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerResponsePBImpl; import org.apache.hadoop.yarn.api.records.AMCommand; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; @@ -106,38 +109,110 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.ReservationAllocationState; +import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.UpdatedContainer; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; -import org.eclipse.jetty.util.log.Log; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Strings; /** * Mock Resource Manager facade implementation that exposes all the methods * implemented by the YARN RM. The behavior and the values returned by this mock - * implementation is expected by the unit test cases. So please change the - * implementation with care. + * implementation is expected by the Router/AMRMProxy unit test cases. So please + * change the implementation with care. */ -public class MockResourceManagerFacade implements - ApplicationMasterProtocol, ApplicationClientProtocol { +public class MockResourceManagerFacade implements ApplicationClientProtocol, + ApplicationMasterProtocol, ResourceManagerAdministrationProtocol { + private static final Logger LOG = + LoggerFactory.getLogger(MockResourceManagerFacade.class); + + private HashSet applicationMap = new HashSet<>(); private HashMap> applicationContainerIdMap = new HashMap>(); private HashMap allocatedContainerMap = new HashMap(); private AtomicInteger containerIndex = new AtomicInteger(0); private Configuration conf; + private int subClusterId; + final private AtomicInteger applicationCounter = new AtomicInteger(0); + + // True if the Mock RM is running, false otherwise. + // This property allows us to write tests for specific scenario as Yarn RM + // down e.g. network issue, failover. + private boolean isRunning; + + private boolean shouldReRegisterNext = false; + + // For unit test synchronization + private static Object syncObj = new Object(); + + public static Object getSyncObj() { + return syncObj; + } public MockResourceManagerFacade(Configuration conf, int startContainerIndex) { + this(conf, startContainerIndex, 0, true); + } + + public MockResourceManagerFacade(Configuration conf, int startContainerIndex, + int subClusterId, boolean isRunning) { this.conf = conf; this.containerIndex.set(startContainerIndex); + this.subClusterId = subClusterId; + this.isRunning = isRunning; + } + + public void setShouldReRegisterNext() { + shouldReRegisterNext = true; + } + + public void setRunningMode(boolean mode) { + this.isRunning = mode; } private static String getAppIdentifier() throws IOException { @@ -150,49 +225,85 @@ public class MockResourceManagerFacade implements break; } } - return result != null ? result.getApplicationAttemptId().toString() - : ""; + return result != null ? result.getApplicationAttemptId().toString() : ""; + } + + private void validateRunning() throws ConnectException { + if (!isRunning) { + throw new ConnectException("RM is stopped"); + } } @Override public RegisterApplicationMasterResponse registerApplicationMaster( - RegisterApplicationMasterRequest request) throws YarnException, - IOException { + RegisterApplicationMasterRequest request) + throws YarnException, IOException { + + validateRunning(); + String amrmToken = getAppIdentifier(); - Log.getLog().info("Registering application attempt: " + amrmToken); + LOG.info("Registering application attempt: " + amrmToken); + + shouldReRegisterNext = false; synchronized (applicationContainerIdMap) { - Assert.assertFalse("The application id is already registered: " - + amrmToken, applicationContainerIdMap.containsKey(amrmToken)); + if (applicationContainerIdMap.containsKey(amrmToken)) { + throw new InvalidApplicationMasterRequestException( + AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE); + } // Keep track of the containers that are returned to this application - applicationContainerIdMap.put(amrmToken, - new ArrayList()); + applicationContainerIdMap.put(amrmToken, new ArrayList()); } - return RegisterApplicationMasterResponse.newInstance(null, null, null, - null, null, request.getHost(), null); + // Make sure we wait for certain test cases last in the method + synchronized (syncObj) { + syncObj.notifyAll(); + // We reuse the port number to indicate whether the unit test want us to + // wait here + if (request.getRpcPort() > 1000) { + LOG.info("Register call in RM start waiting"); + try { + syncObj.wait(); + LOG.info("Register call in RM wait finished"); + } catch (InterruptedException e) { + LOG.info("Register call in RM wait interrupted", e); + } + } + } + + return RegisterApplicationMasterResponse.newInstance(null, null, null, null, + null, request.getHost(), null); } @Override public FinishApplicationMasterResponse finishApplicationMaster( - FinishApplicationMasterRequest request) throws YarnException, - IOException { + FinishApplicationMasterRequest request) + throws YarnException, IOException { + + validateRunning(); + String amrmToken = getAppIdentifier(); - Log.getLog().info("Finishing application attempt: " + amrmToken); + LOG.info("Finishing application attempt: " + amrmToken); + + if (shouldReRegisterNext) { + String message = "AM is not registered, should re-register."; + LOG.warn(message); + throw new ApplicationMasterNotRegisteredException(message); + } synchronized (applicationContainerIdMap) { // Remove the containers that were being tracked for this application - Assert.assertTrue("The application id is NOT registered: " - + amrmToken, applicationContainerIdMap.containsKey(amrmToken)); + Assert.assertTrue("The application id is NOT registered: " + amrmToken, + applicationContainerIdMap.containsKey(amrmToken)); List ids = applicationContainerIdMap.remove(amrmToken); for (ContainerId c : ids) { allocatedContainerMap.remove(c); } } - return FinishApplicationMasterResponse - .newInstance(request.getFinalApplicationStatus() == FinalApplicationStatus.SUCCEEDED ? true - : false); + return FinishApplicationMasterResponse.newInstance( + request.getFinalApplicationStatus() == FinalApplicationStatus.SUCCEEDED + ? true : false); } protected ApplicationId getApplicationId(int id) { @@ -207,6 +318,9 @@ public class MockResourceManagerFacade implements @Override public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException { + + validateRunning(); + if (request.getAskList() != null && request.getAskList().size() > 0 && request.getReleaseList() != null && request.getReleaseList().size() > 0) { @@ -215,14 +329,20 @@ public class MockResourceManagerFacade implements } String amrmToken = getAppIdentifier(); + LOG.info("Allocate from application attempt: " + amrmToken); + + if (shouldReRegisterNext) { + String message = "AM is not registered, should re-register."; + LOG.warn(message); + throw new ApplicationMasterNotRegisteredException(message); + } ArrayList containerList = new ArrayList(); if (request.getAskList() != null) { for (ResourceRequest rr : request.getAskList()) { for (int i = 0; i < rr.getNumContainers(); i++) { - ContainerId containerId = - ContainerId.newInstance(getApplicationAttemptId(1), - containerIndex.incrementAndGet()); + ContainerId containerId = ContainerId.newInstance( + getApplicationAttemptId(1), containerIndex.incrementAndGet()); Container container = Records.newRecord(Container.class); container.setId(containerId); container.setPriority(rr.getPriority()); @@ -230,9 +350,8 @@ public class MockResourceManagerFacade implements // We don't use the node for running containers in the test cases. So // it is OK to hard code it to some dummy value NodeId nodeId = - NodeId.newInstance( - !Strings.isNullOrEmpty(rr.getResourceName()) ? rr - .getResourceName() : "dummy", 1000); + NodeId.newInstance(!Strings.isNullOrEmpty(rr.getResourceName()) + ? rr.getResourceName() : "dummy", 1000); container.setNodeId(nodeId); container.setResource(rr.getCapability()); containerList.add(container); @@ -244,8 +363,7 @@ public class MockResourceManagerFacade implements "The application id is Not registered before allocate(): " + amrmToken, applicationContainerIdMap.containsKey(amrmToken)); - List ids = - applicationContainerIdMap.get(amrmToken); + List ids = applicationContainerIdMap.get(amrmToken); ids.add(containerId); this.allocatedContainerMap.put(containerId, container); } @@ -255,13 +373,13 @@ public class MockResourceManagerFacade implements if (request.getReleaseList() != null && request.getReleaseList().size() > 0) { - Log.getLog().info("Releasing containers: " - + request.getReleaseList().size()); + LOG.info("Releasing containers: " + request.getReleaseList().size()); synchronized (applicationContainerIdMap) { - Assert.assertTrue( - "The application id is not registered before allocate(): " - + amrmToken, - applicationContainerIdMap.containsKey(amrmToken)); + Assert + .assertTrue( + "The application id is not registered before allocate(): " + + amrmToken, + applicationContainerIdMap.containsKey(amrmToken)); List ids = applicationContainerIdMap.get(amrmToken); for (ContainerId id : request.getReleaseList()) { @@ -273,10 +391,9 @@ public class MockResourceManagerFacade implements } } - Assert.assertTrue( - "ContainerId " + id - + " being released is not valid for application: " - + conf.get("AMRMTOKEN"), found); + Assert.assertTrue("ContainerId " + id + + " being released is not valid for application: " + + conf.get("AMRMTOKEN"), found); ids.remove(id); @@ -286,9 +403,8 @@ public class MockResourceManagerFacade implements // returning of fake containers is ONLY done for testing purpose - for // the test code to get confirmation that the sub-cluster resource // managers received the release request - ContainerId fakeContainerId = - ContainerId.newInstance(getApplicationAttemptId(1), - containerIndex.incrementAndGet()); + ContainerId fakeContainerId = ContainerId.newInstance( + getApplicationAttemptId(1), containerIndex.incrementAndGet()); Container fakeContainer = allocatedContainerMap.get(id); fakeContainer.setId(fakeContainerId); containerList.add(fakeContainer); @@ -296,46 +412,48 @@ public class MockResourceManagerFacade implements } } - Log.getLog().info("Allocating containers: " + containerList.size() + LOG.info("Allocating containers: " + containerList.size() + " for application attempt: " + conf.get("AMRMTOKEN")); // Always issue a new AMRMToken as if RM rolled master key Token newAMRMToken = Token.newInstance(new byte[0], "", new byte[0], ""); - return AllocateResponse.newInstance(0, - new ArrayList(), containerList, - new ArrayList(), null, AMCommand.AM_RESYNC, 1, null, - new ArrayList(), newAMRMToken, + return AllocateResponse.newInstance(0, new ArrayList(), + containerList, new ArrayList(), null, AMCommand.AM_RESYNC, + 1, null, new ArrayList(), newAMRMToken, new ArrayList()); } @Override public GetApplicationReportResponse getApplicationReport( - GetApplicationReportRequest request) throws YarnException, - IOException { + GetApplicationReportRequest request) throws YarnException, IOException { + + validateRunning(); GetApplicationReportResponse response = Records.newRecord(GetApplicationReportResponse.class); ApplicationReport report = Records.newRecord(ApplicationReport.class); report.setYarnApplicationState(YarnApplicationState.ACCEPTED); report.setApplicationId(request.getApplicationId()); - report.setCurrentApplicationAttemptId(ApplicationAttemptId - .newInstance(request.getApplicationId(), 1)); + report.setCurrentApplicationAttemptId( + ApplicationAttemptId.newInstance(request.getApplicationId(), 1)); response.setApplicationReport(report); return response; } @Override public GetApplicationAttemptReportResponse getApplicationAttemptReport( - GetApplicationAttemptReportRequest request) throws YarnException, - IOException { + GetApplicationAttemptReportRequest request) + throws YarnException, IOException { + + validateRunning(); + GetApplicationAttemptReportResponse response = Records.newRecord(GetApplicationAttemptReportResponse.class); ApplicationAttemptReport report = Records.newRecord(ApplicationAttemptReport.class); report.setApplicationAttemptId(request.getApplicationAttemptId()); - report - .setYarnApplicationAttemptState(YarnApplicationAttemptState.LAUNCHED); + report.setYarnApplicationAttemptState(YarnApplicationAttemptState.LAUNCHED); response.setApplicationAttemptReport(report); return response; } @@ -343,172 +461,395 @@ public class MockResourceManagerFacade implements @Override public GetNewApplicationResponse getNewApplication( GetNewApplicationRequest request) throws YarnException, IOException { - return null; + + validateRunning(); + + return GetNewApplicationResponse.newInstance(ApplicationId.newInstance( + subClusterId, applicationCounter.incrementAndGet()), null, null); } @Override public SubmitApplicationResponse submitApplication( SubmitApplicationRequest request) throws YarnException, IOException { - return null; + + validateRunning(); + + ApplicationId appId = null; + if (request.getApplicationSubmissionContext() != null) { + appId = request.getApplicationSubmissionContext().getApplicationId(); + } + LOG.info("Application submitted: " + appId); + applicationMap.add(appId); + return SubmitApplicationResponse.newInstance(); } @Override public KillApplicationResponse forceKillApplication( KillApplicationRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + ApplicationId appId = null; + if (request.getApplicationId() != null) { + appId = request.getApplicationId(); + if (!applicationMap.remove(appId)) { + throw new ApplicationNotFoundException( + "Trying to kill an absent application: " + appId); + } + } + LOG.info("Force killing application: " + appId); + return KillApplicationResponse.newInstance(true); } @Override public GetClusterMetricsResponse getClusterMetrics( GetClusterMetricsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return GetClusterMetricsResponse.newInstance(null); } @Override - public GetApplicationsResponse getApplications( - GetApplicationsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + public GetApplicationsResponse getApplications(GetApplicationsRequest request) + throws YarnException, IOException { + + validateRunning(); + + return GetApplicationsResponse.newInstance(null); } @Override - public GetClusterNodesResponse getClusterNodes( - GetClusterNodesRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) + throws YarnException, IOException { + + validateRunning(); + + return GetClusterNodesResponse.newInstance(null); } @Override public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return GetQueueInfoResponse.newInstance(null); } @Override public GetQueueUserAclsInfoResponse getQueueUserAcls( - GetQueueUserAclsInfoRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + GetQueueUserAclsInfoRequest request) throws YarnException, IOException { + + validateRunning(); + + return GetQueueUserAclsInfoResponse.newInstance(null); } @Override public GetDelegationTokenResponse getDelegationToken( GetDelegationTokenRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return GetDelegationTokenResponse.newInstance(null); } @Override public RenewDelegationTokenResponse renewDelegationToken( - RenewDelegationTokenRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + RenewDelegationTokenRequest request) throws YarnException, IOException { + + validateRunning(); + + return RenewDelegationTokenResponse.newInstance(0); } @Override public CancelDelegationTokenResponse cancelDelegationToken( - CancelDelegationTokenRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + CancelDelegationTokenRequest request) throws YarnException, IOException { + + validateRunning(); + + return CancelDelegationTokenResponse.newInstance(); } @Override public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( - MoveApplicationAcrossQueuesRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + MoveApplicationAcrossQueuesRequest request) + throws YarnException, IOException { + + validateRunning(); + + return MoveApplicationAcrossQueuesResponse.newInstance(); } @Override public GetApplicationAttemptsResponse getApplicationAttempts( - GetApplicationAttemptsRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + GetApplicationAttemptsRequest request) throws YarnException, IOException { + + validateRunning(); + + return GetApplicationAttemptsResponse.newInstance(null); } @Override public GetContainerReportResponse getContainerReport( GetContainerReportRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return GetContainerReportResponse.newInstance(null); } @Override public GetContainersResponse getContainers(GetContainersRequest request) throws YarnException, IOException { - throw new NotImplementedException(); - } - @Override - public GetNewReservationResponse getNewReservation( - GetNewReservationRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + validateRunning(); + + return GetContainersResponse.newInstance(null); } @Override public ReservationSubmissionResponse submitReservation( - ReservationSubmissionRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + ReservationSubmissionRequest request) throws YarnException, IOException { + + validateRunning(); + + return ReservationSubmissionResponse.newInstance(); } @Override public ReservationListResponse listReservations( - ReservationListRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + ReservationListRequest request) throws YarnException, IOException { + + validateRunning(); + + return ReservationListResponse + .newInstance(new ArrayList()); } @Override public ReservationUpdateResponse updateReservation( ReservationUpdateRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return ReservationUpdateResponse.newInstance(); } @Override public ReservationDeleteResponse deleteReservation( ReservationDeleteRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return ReservationDeleteResponse.newInstance(); } @Override public GetNodesToLabelsResponse getNodeToLabels( GetNodesToLabelsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return GetNodesToLabelsResponse + .newInstance(new HashMap>()); } @Override public GetClusterNodeLabelsResponse getClusterNodeLabels( - GetClusterNodeLabelsRequest request) throws YarnException, - IOException { - throw new NotImplementedException(); + GetClusterNodeLabelsRequest request) throws YarnException, IOException { + + validateRunning(); + + return GetClusterNodeLabelsResponse.newInstance(new ArrayList()); } @Override public GetLabelsToNodesResponse getLabelsToNodes( GetLabelsToNodesRequest request) throws YarnException, IOException { - return null; + + validateRunning(); + + return GetLabelsToNodesResponse.newInstance(null); } @Override - public UpdateApplicationPriorityResponse updateApplicationPriority( - UpdateApplicationPriorityRequest request) throws YarnException, - IOException { - return null; - } + public GetNewReservationResponse getNewReservation( + GetNewReservationRequest request) throws YarnException, IOException { - @Override - public SignalContainerResponse signalToContainer( - SignalContainerRequest request) throws IOException { -return null; -} + validateRunning(); + + return GetNewReservationResponse + .newInstance(ReservationId.newInstance(0, 0)); + } @Override public FailApplicationAttemptResponse failApplicationAttempt( FailApplicationAttemptRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return FailApplicationAttemptResponse.newInstance(); + } + + @Override + public UpdateApplicationPriorityResponse updateApplicationPriority( + UpdateApplicationPriorityRequest request) + throws YarnException, IOException { + + validateRunning(); + + return UpdateApplicationPriorityResponse.newInstance(null); + } + + @Override + public SignalContainerResponse signalToContainer( + SignalContainerRequest request) throws YarnException, IOException { + + validateRunning(); + + return new SignalContainerResponsePBImpl(); } @Override public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( UpdateApplicationTimeoutsRequest request) throws YarnException, IOException { - throw new NotImplementedException(); + + validateRunning(); + + return UpdateApplicationTimeoutsResponse.newInstance(); + } + + @Override + public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) + throws StandbyException, YarnException, IOException { + + validateRunning(); + + return RefreshQueuesResponse.newInstance(); + } + + @Override + public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) + throws StandbyException, YarnException, IOException { + + validateRunning(); + + return RefreshNodesResponse.newInstance(); + } + + @Override + public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( + RefreshSuperUserGroupsConfigurationRequest request) + throws StandbyException, YarnException, IOException { + + validateRunning(); + + return RefreshSuperUserGroupsConfigurationResponse.newInstance(); + } + + @Override + public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( + RefreshUserToGroupsMappingsRequest request) + throws StandbyException, YarnException, IOException { + + validateRunning(); + + return RefreshUserToGroupsMappingsResponse.newInstance(); + } + + @Override + public RefreshAdminAclsResponse refreshAdminAcls( + RefreshAdminAclsRequest request) throws YarnException, IOException { + + validateRunning(); + + return RefreshAdminAclsResponse.newInstance(); + } + + @Override + public RefreshServiceAclsResponse refreshServiceAcls( + RefreshServiceAclsRequest request) throws YarnException, IOException { + + validateRunning(); + + return RefreshServiceAclsResponse.newInstance(); + } + + @Override + public UpdateNodeResourceResponse updateNodeResource( + UpdateNodeResourceRequest request) throws YarnException, IOException { + + validateRunning(); + + return UpdateNodeResourceResponse.newInstance(); + } + + @Override + public RefreshNodesResourcesResponse refreshNodesResources( + RefreshNodesResourcesRequest request) throws YarnException, IOException { + + validateRunning(); + + return RefreshNodesResourcesResponse.newInstance(); + } + + @Override + public AddToClusterNodeLabelsResponse addToClusterNodeLabels( + AddToClusterNodeLabelsRequest request) throws YarnException, IOException { + + validateRunning(); + + return AddToClusterNodeLabelsResponse.newInstance(); + } + + @Override + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) + throws YarnException, IOException { + + validateRunning(); + + return RemoveFromClusterNodeLabelsResponse.newInstance(); + } + + @Override + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException { + + validateRunning(); + + return ReplaceLabelsOnNodeResponse.newInstance(); + } + + @Override + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws YarnException, IOException { + + validateRunning(); + + return CheckForDecommissioningNodesResponse.newInstance(null); + } + + @Override + public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority( + RefreshClusterMaxPriorityRequest request) + throws YarnException, IOException { + + validateRunning(); + + return RefreshClusterMaxPriorityResponse.newInstance(); + } + + @Override + public String[] getGroupsForUser(String user) throws IOException { + + validateRunning(); + + return new String[0]; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java new file mode 100644 index 00000000000..23978ed886e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Test; + +/** + * Base class for policies tests, tests for common reinitialization cases. + */ +public abstract class BaseFederationPoliciesTest { + + private ConfigurableFederationPolicy policy; + private WeightedPolicyInfo policyInfo = mock(WeightedPolicyInfo.class); + private Map activeSubclusters = new HashMap<>(); + private FederationPolicyInitializationContext federationPolicyContext; + private ApplicationSubmissionContext applicationSubmissionContext = + mock(ApplicationSubmissionContext.class); + private Random rand = new Random(); + private SubClusterId homeSubCluster; + + @Test + public void testReinitilialize() throws YarnException { + FederationPolicyInitializationContext fpc = + new FederationPolicyInitializationContext(); + ByteBuffer buf = getPolicyInfo().toByteBuffer(); + fpc.setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration + .newInstance("queue1", getPolicy().getClass().getCanonicalName(), buf)); + fpc.setFederationSubclusterResolver( + FederationPoliciesTestUtil.initResolver()); + fpc.setFederationStateStoreFacade(FederationPoliciesTestUtil.initFacade()); + getPolicy().reinitialize(fpc); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void testReinitilializeBad1() throws YarnException { + getPolicy().reinitialize(null); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void testReinitilializeBad2() throws YarnException { + FederationPolicyInitializationContext fpc = + new FederationPolicyInitializationContext(); + getPolicy().reinitialize(fpc); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void testReinitilializeBad3() throws YarnException { + FederationPolicyInitializationContext fpc = + new FederationPolicyInitializationContext(); + ByteBuffer buf = mock(ByteBuffer.class); + fpc.setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration + .newInstance("queue1", "WrongPolicyName", buf)); + fpc.setFederationSubclusterResolver( + FederationPoliciesTestUtil.initResolver()); + fpc.setFederationStateStoreFacade(FederationPoliciesTestUtil.initFacade()); + getPolicy().reinitialize(fpc); + } + + @Test(expected = FederationPolicyException.class) + public void testNoSubclusters() throws YarnException { + // empty the activeSubclusters map + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), new HashMap<>()); + + ConfigurableFederationPolicy localPolicy = getPolicy(); + if (localPolicy instanceof FederationRouterPolicy) { + ((FederationRouterPolicy) localPolicy) + .getHomeSubcluster(getApplicationSubmissionContext(), null); + } else { + String[] hosts = new String[] {"host1", "host2"}; + List resourceRequests = FederationPoliciesTestUtil + .createResourceRequests(hosts, 2 * 1024, 2, 1, 3, null, false); + ((FederationAMRMProxyPolicy) localPolicy) + .splitResourceRequests(resourceRequests); + } + } + + public ConfigurableFederationPolicy getPolicy() { + return policy; + } + + public void setPolicy(ConfigurableFederationPolicy policy) { + this.policy = policy; + } + + public WeightedPolicyInfo getPolicyInfo() { + return policyInfo; + } + + public void setPolicyInfo(WeightedPolicyInfo policyInfo) { + this.policyInfo = policyInfo; + } + + public Map getActiveSubclusters() { + return activeSubclusters; + } + + public void setActiveSubclusters( + Map activeSubclusters) { + this.activeSubclusters = activeSubclusters; + } + + public FederationPolicyInitializationContext getFederationPolicyContext() { + return federationPolicyContext; + } + + public void setFederationPolicyContext( + FederationPolicyInitializationContext federationPolicyContext) { + this.federationPolicyContext = federationPolicyContext; + } + + public ApplicationSubmissionContext getApplicationSubmissionContext() { + return applicationSubmissionContext; + } + + public void setApplicationSubmissionContext( + ApplicationSubmissionContext applicationSubmissionContext) { + this.applicationSubmissionContext = applicationSubmissionContext; + } + + public Random getRand() { + return rand; + } + + public void setRand(Random rand) { + this.rand = rand; + } + + public SubClusterId getHomeSubCluster() { + return homeSubCluster; + } + + public void setHomeSubCluster(SubClusterId homeSubCluster) { + this.homeSubCluster = homeSubCluster; + } + + public void setMockActiveSubclusters(int numSubclusters) { + for (int i = 1; i <= numSubclusters; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestFederationPolicyInitializationContextValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestFederationPolicyInitializationContextValidator.java new file mode 100644 index 00000000000..611a48611ea --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestFederationPolicyInitializationContextValidator.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager; +import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.junit.Before; +import org.junit.Test; + +/** + * Test class for {@link FederationPolicyInitializationContextValidator}. + */ +public class TestFederationPolicyInitializationContextValidator { + + private SubClusterPolicyConfiguration goodConfig; + private SubClusterResolver goodSR; + private FederationStateStoreFacade goodFacade; + private SubClusterId goodHome; + private FederationPolicyInitializationContext context; + + @Before + public void setUp() throws Exception { + goodFacade = FederationPoliciesTestUtil.initFacade(); + goodConfig = new MockPolicyManager().serializeConf(); + goodSR = FederationPoliciesTestUtil.initResolver(); + goodHome = SubClusterId.newInstance("homesubcluster"); + context = new FederationPolicyInitializationContext(goodConfig, goodSR, + goodFacade, goodHome); + } + + @Test + public void correcInit() throws Exception { + FederationPolicyInitializationContextValidator.validate(context, + MockPolicyManager.class.getCanonicalName()); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void nullContext() throws Exception { + FederationPolicyInitializationContextValidator.validate(null, + MockPolicyManager.class.getCanonicalName()); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void nullType() throws Exception { + FederationPolicyInitializationContextValidator.validate(context, null); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void wrongType() throws Exception { + FederationPolicyInitializationContextValidator.validate(context, + "WrongType"); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void nullConf() throws Exception { + context.setSubClusterPolicyConfiguration(null); + FederationPolicyInitializationContextValidator.validate(context, + MockPolicyManager.class.getCanonicalName()); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void nullResolver() throws Exception { + context.setFederationSubclusterResolver(null); + FederationPolicyInitializationContextValidator.validate(context, + MockPolicyManager.class.getCanonicalName()); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void nullFacade() throws Exception { + context.setFederationStateStoreFacade(null); + FederationPolicyInitializationContextValidator.validate(context, + MockPolicyManager.class.getCanonicalName()); + } + + private class MockPolicyManager implements FederationPolicyManager { + + @Override + public FederationAMRMProxyPolicy getAMRMPolicy( + FederationPolicyInitializationContext policyContext, + FederationAMRMProxyPolicy oldInstance) + throws FederationPolicyInitializationException { + return null; + } + + @Override + public FederationRouterPolicy getRouterPolicy( + FederationPolicyInitializationContext policyContext, + FederationRouterPolicy oldInstance) + throws FederationPolicyInitializationException { + return null; + } + + @Override + public SubClusterPolicyConfiguration serializeConf() + throws FederationPolicyInitializationException { + ByteBuffer buf = ByteBuffer.allocate(0); + return SubClusterPolicyConfiguration.newInstance("queue1", + this.getClass().getCanonicalName(), buf); + } + + @Override + public String getQueue() { + return "default"; + } + + @Override + public void setQueue(String queue) { + + } + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java new file mode 100644 index 00000000000..d0e2decb2de --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.manager.PriorityBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test of {@link RouterPolicyFacade}. + */ +public class TestRouterPolicyFacade { + + private RouterPolicyFacade routerFacade; + private List subClusterIds; + private FederationStateStore store; + private String queue1 = "queue1"; + private String defQueueKey = YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY; + + @Before + public void setup() throws YarnException { + + // setting up a store and its facade (with caching off) + FederationStateStoreFacade fedFacade = + FederationStateStoreFacade.getInstance(); + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, "0"); + store = new MemoryFederationStateStore(); + store.init(conf); + fedFacade.reinitialize(store, conf); + + FederationStateStoreTestUtil storeTestUtil = + new FederationStateStoreTestUtil(store); + storeTestUtil.registerSubClusters(10); + + subClusterIds = storeTestUtil.getAllSubClusterIds(true); + store.setPolicyConfiguration(SetSubClusterPolicyConfigurationRequest + .newInstance(getUniformPolicy(queue1))); + + SubClusterResolver resolver = FederationPoliciesTestUtil.initResolver(); + routerFacade = new RouterPolicyFacade(new YarnConfiguration(), fedFacade, + resolver, subClusterIds.get(0)); + } + + @Test + public void testConfigurationUpdate() throws YarnException { + + // in this test we see what happens when the configuration is changed + // between calls. We achieve this by changing what is in the store. + + ApplicationSubmissionContext applicationSubmissionContext = + mock(ApplicationSubmissionContext.class); + when(applicationSubmissionContext.getQueue()).thenReturn(queue1); + + // first call runs using standard UniformRandomRouterPolicy + SubClusterId chosen = + routerFacade.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertTrue(subClusterIds.contains(chosen)); + Assert.assertTrue(routerFacade.globalPolicyMap + .get(queue1) instanceof UniformRandomRouterPolicy); + + // then the operator changes how queue1 is routed setting it to + // PriorityRouterPolicy with weights favoring the first subcluster in + // subClusterIds. + store.setPolicyConfiguration(SetSubClusterPolicyConfigurationRequest + .newInstance(getPriorityPolicy(queue1))); + + // second call is routed by new policy PriorityRouterPolicy + chosen = routerFacade.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertTrue(chosen.equals(subClusterIds.get(0))); + Assert.assertTrue(routerFacade.globalPolicyMap + .get(queue1) instanceof PriorityRouterPolicy); + } + + @Test + public void testGetHomeSubcluster() throws YarnException { + + ApplicationSubmissionContext applicationSubmissionContext = + mock(ApplicationSubmissionContext.class); + when(applicationSubmissionContext.getQueue()).thenReturn(queue1); + + // the facade only contains the fallback behavior + Assert.assertTrue(routerFacade.globalPolicyMap.containsKey(defQueueKey) + && routerFacade.globalPolicyMap.size() == 1); + + // when invoked it returns the expected SubClusterId. + SubClusterId chosen = + routerFacade.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertTrue(subClusterIds.contains(chosen)); + + // now the caching of policies must have added an entry for this queue + Assert.assertTrue(routerFacade.globalPolicyMap.size() == 2); + + // after the facade is used the policyMap contains the expected policy type. + Assert.assertTrue(routerFacade.globalPolicyMap + .get(queue1) instanceof UniformRandomRouterPolicy); + + // the facade is again empty after reset + routerFacade.reset(); + // the facade only contains the fallback behavior + Assert.assertTrue(routerFacade.globalPolicyMap.containsKey(defQueueKey) + && routerFacade.globalPolicyMap.size() == 1); + + } + + @Test + public void testFallbacks() throws YarnException { + + // this tests the behavior of the system when the queue requested is + // not configured (or null) and there is no default policy configured + // for DEFAULT_FEDERATION_POLICY_KEY (*). This is our second line of + // defense. + + ApplicationSubmissionContext applicationSubmissionContext = + mock(ApplicationSubmissionContext.class); + + // The facade answers also for non-initialized policies (using the + // defaultPolicy) + String uninitQueue = "non-initialized-queue"; + when(applicationSubmissionContext.getQueue()).thenReturn(uninitQueue); + SubClusterId chosen = + routerFacade.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertTrue(subClusterIds.contains(chosen)); + Assert.assertFalse(routerFacade.globalPolicyMap.containsKey(uninitQueue)); + + // empty string + when(applicationSubmissionContext.getQueue()).thenReturn(""); + chosen = routerFacade.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertTrue(subClusterIds.contains(chosen)); + Assert.assertFalse(routerFacade.globalPolicyMap.containsKey(uninitQueue)); + + // null queue also falls back to default + when(applicationSubmissionContext.getQueue()).thenReturn(null); + chosen = routerFacade.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertTrue(subClusterIds.contains(chosen)); + Assert.assertFalse(routerFacade.globalPolicyMap.containsKey(uninitQueue)); + + } + + public static SubClusterPolicyConfiguration getUniformPolicy(String queue) + throws FederationPolicyInitializationException { + + // we go through standard lifecycle instantiating a policyManager and + // configuring it and serializing it to a conf. + UniformBroadcastPolicyManager wfp = new UniformBroadcastPolicyManager(); + wfp.setQueue(queue); + + SubClusterPolicyConfiguration fpc = wfp.serializeConf(); + + return fpc; + } + + public SubClusterPolicyConfiguration getPriorityPolicy(String queue) + throws FederationPolicyInitializationException { + + // we go through standard lifecycle instantiating a policyManager and + // configuring it and serializing it to a conf. + PriorityBroadcastPolicyManager wfp = new PriorityBroadcastPolicyManager(); + + // equal weight to all subcluster + Map routerWeights = new HashMap<>(); + for (SubClusterId s : subClusterIds) { + routerWeights.put(new SubClusterIdInfo(s), 0.9f / subClusterIds.size()); + } + + // beside the first one who gets more weight + SubClusterIdInfo favorite = new SubClusterIdInfo((subClusterIds.get(0))); + routerWeights.put(favorite, (0.1f + 0.9f / subClusterIds.size())); + + WeightedPolicyInfo policyInfo = new WeightedPolicyInfo(); + policyInfo.setRouterPolicyWeights(routerWeights); + wfp.setWeightedPolicyInfo(policyInfo); + wfp.setQueue(queue); + + // serializeConf it in a context + SubClusterPolicyConfiguration fpc = wfp.serializeConf(); + + return fpc; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java new file mode 100644 index 00000000000..a21f53dc924 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.server.federation.policies.BaseFederationPoliciesTest; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link BroadcastAMRMProxyPolicy}. + */ +public class TestBroadcastAMRMProxyFederationPolicy + extends BaseFederationPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new BroadcastAMRMProxyPolicy()); + // needed for base test to work + setPolicyInfo(mock(WeightedPolicyInfo.class)); + + for (int i = 1; i <= 2; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + mock(WeightedPolicyInfo.class), getActiveSubclusters()); + + } + + @Test + public void testSplitAllocateRequest() throws Exception { + // verify the request is broadcasted to all subclusters + String[] hosts = new String[] {"host1", "host2" }; + List resourceRequests = FederationPoliciesTestUtil + .createResourceRequests(hosts, 2 * 1024, 2, 1, 3, null, false); + + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + Assert.assertTrue(response.size() == 2); + for (Map.Entry> entry : response + .entrySet()) { + Assert.assertTrue(getActiveSubclusters().get(entry.getKey()) != null); + for (ResourceRequest r : entry.getValue()) { + Assert.assertTrue(resourceRequests.contains(r)); + } + } + for (SubClusterId subClusterId : getActiveSubclusters().keySet()) { + for (ResourceRequest r : response.get(subClusterId)) { + Assert.assertTrue(resourceRequests.contains(r)); + } + } + } + + @Test + public void testNotifyOfResponse() throws Exception { + String[] hosts = new String[] {"host1", "host2" }; + List resourceRequests = FederationPoliciesTestUtil + .createResourceRequests(hosts, 2 * 1024, 2, 1, 3, null, false); + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + + try { + ((FederationAMRMProxyPolicy) getPolicy()).notifyOfResponse( + SubClusterId.newInstance("sc3"), mock(AllocateResponse.class)); + Assert.fail(); + } catch (FederationPolicyException f) { + System.out.println("Expected: " + f.getMessage()); + } + + ((FederationAMRMProxyPolicy) getPolicy()).notifyOfResponse( + SubClusterId.newInstance("sc1"), mock(AllocateResponse.class)); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java new file mode 100644 index 00000000000..6e3a2f14efe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java @@ -0,0 +1,602 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.NMToken; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.BaseFederationPoliciesTest; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Simple test class for the {@link LocalityMulticastAMRMProxyPolicy}. + */ +public class TestLocalityMulticastAMRMProxyPolicy + extends BaseFederationPoliciesTest { + + public static final Logger LOG = + LoggerFactory.getLogger(TestLocalityMulticastAMRMProxyPolicy.class); + + @Before + public void setUp() throws Exception { + setPolicy(new LocalityMulticastAMRMProxyPolicy()); + setPolicyInfo(new WeightedPolicyInfo()); + Map routerWeights = new HashMap<>(); + Map amrmWeights = new HashMap<>(); + + // simulate 20 subclusters with a 5% chance of being inactive + for (int i = 0; i < 6; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("subcluster" + i); + // sub-cluster 3 is not active + if (i != 3) { + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + + float weight = 1 / 10f; + routerWeights.put(sc, weight); + amrmWeights.put(sc, weight); + // sub-cluster 4 is "disabled" in the weights + if (i == 4) { + routerWeights.put(sc, 0f); + amrmWeights.put(sc, 0f); + } + } + + getPolicyInfo().setRouterPolicyWeights(routerWeights); + getPolicyInfo().setAMRMPolicyWeights(amrmWeights); + getPolicyInfo().setHeadroomAlpha(0.5f); + setHomeSubCluster(SubClusterId.newInstance("homesubcluster")); + + } + + @Test + public void testReinitilialize() throws YarnException { + initializePolicy(); + } + + private void initializePolicy() throws YarnException { + setFederationPolicyContext(new FederationPolicyInitializationContext()); + SubClusterResolver resolver = FederationPoliciesTestUtil.initResolver(); + getFederationPolicyContext().setFederationSubclusterResolver(resolver); + ByteBuffer buf = getPolicyInfo().toByteBuffer(); + getFederationPolicyContext().setSubClusterPolicyConfiguration( + SubClusterPolicyConfiguration.newInstance("queue1", + getPolicy().getClass().getCanonicalName(), buf)); + getFederationPolicyContext().setHomeSubcluster(getHomeSubCluster()); + FederationPoliciesTestUtil.initializePolicyContext( + getFederationPolicyContext(), getPolicy(), getPolicyInfo(), + getActiveSubclusters()); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void testNullWeights() throws Exception { + getPolicyInfo().setAMRMPolicyWeights(null); + initializePolicy(); + fail(); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void testEmptyWeights() throws Exception { + getPolicyInfo() + .setAMRMPolicyWeights(new HashMap()); + initializePolicy(); + fail(); + } + + @Test + public void testSplitBasedOnHeadroom() throws Exception { + + // Tests how the headroom info are used to split based on the capacity + // each RM claims to give us. + // Configure policy to be 100% headroom based + getPolicyInfo().setHeadroomAlpha(1.0f); + + initializePolicy(); + List resourceRequests = createSimpleRequest(); + + prepPolicyWithHeadroom(); + + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + + // pretty print requests + LOG.info("Initial headroom"); + prettyPrintRequests(response); + + validateSplit(response, resourceRequests); + + // based on headroom, we expect 75 containers to got to subcluster0, + // as it advertise lots of headroom (100), no containers for sublcuster1 + // as it advertise zero headroom, 1 to subcluster 2 (as it advertise little + // headroom (1), and 25 to subcluster5 which has unknown headroom, and so + // it gets 1/4th of the load + checkExpectedAllocation(response, "subcluster0", 1, 75); + checkExpectedAllocation(response, "subcluster1", 1, -1); + checkExpectedAllocation(response, "subcluster2", 1, 1); + checkExpectedAllocation(response, "subcluster5", 1, 25); + + // notify a change in headroom and try again + AllocateResponse ar = getAllocateResponseWithTargetHeadroom(100); + ((FederationAMRMProxyPolicy) getPolicy()) + .notifyOfResponse(SubClusterId.newInstance("subcluster2"), ar); + response = ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + + LOG.info("After headroom update"); + prettyPrintRequests(response); + validateSplit(response, resourceRequests); + + // we simulated a change in headroom for subcluster2, which will now + // have the same headroom of subcluster0 and so it splits the requests + // note that the total is still less or equal to (userAsk + numSubClusters) + checkExpectedAllocation(response, "subcluster0", 1, 38); + checkExpectedAllocation(response, "subcluster1", 1, -1); + checkExpectedAllocation(response, "subcluster2", 1, 38); + checkExpectedAllocation(response, "subcluster5", 1, 25); + + } + + @Test(timeout = 5000) + public void testStressPolicy() throws Exception { + + // Tests how the headroom info are used to split based on the capacity + // each RM claims to give us. + // Configure policy to be 100% headroom based + getPolicyInfo().setHeadroomAlpha(1.0f); + + initializePolicy(); + + int numRR = 1000; + List resourceRequests = createLargeRandomList(numRR); + + prepPolicyWithHeadroom(); + + int numIterations = 1000; + long tstart = System.currentTimeMillis(); + for (int i = 0; i < numIterations; i++) { + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + validateSplit(response, resourceRequests); + } + long tend = System.currentTimeMillis(); + + LOG.info("Performed " + numIterations + " policy invocations (and " + + "validations) in " + (tend - tstart) + "ms"); + } + + @Test + public void testFWDAllZeroANY() throws Exception { + + // Tests how the headroom info are used to split based on the capacity + // each RM claims to give us. + // Configure policy to be 100% headroom based + getPolicyInfo().setHeadroomAlpha(0.5f); + + initializePolicy(); + List resourceRequests = createZeroSizedANYRequest(); + + // this receives responses from sc0,sc1,sc2 + prepPolicyWithHeadroom(); + + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + + // we expect all three to appear for a zero-sized ANY + + // pretty print requests + prettyPrintRequests(response); + + validateSplit(response, resourceRequests); + + // we expect the zero size request to be sent to the first 3 rm (due to + // the fact that we received responses only from these 3 sublcusters) + checkExpectedAllocation(response, "subcluster0", 1, 0); + checkExpectedAllocation(response, "subcluster1", 1, 0); + checkExpectedAllocation(response, "subcluster2", 1, 0); + checkExpectedAllocation(response, "subcluster3", -1, -1); + checkExpectedAllocation(response, "subcluster4", -1, -1); + checkExpectedAllocation(response, "subcluster5", -1, -1); + } + + @Test + public void testSplitBasedOnHeadroomAndWeights() throws Exception { + + // Tests how the headroom info are used to split based on the capacity + // each RM claims to give us. + + // Configure policy to be 50% headroom based and 50% weight based + getPolicyInfo().setHeadroomAlpha(0.5f); + + initializePolicy(); + List resourceRequests = createSimpleRequest(); + + prepPolicyWithHeadroom(); + + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + + // pretty print requests + prettyPrintRequests(response); + + validateSplit(response, resourceRequests); + + // in this case the headroom allocates 50 containers, while weights allocate + // the rest. due to weights we have 12.5 (round to 13) containers for each + // sublcuster, the rest is due to headroom. + checkExpectedAllocation(response, "subcluster0", 1, 50); + checkExpectedAllocation(response, "subcluster1", 1, 13); + checkExpectedAllocation(response, "subcluster2", 1, 13); + checkExpectedAllocation(response, "subcluster3", -1, -1); + checkExpectedAllocation(response, "subcluster4", -1, -1); + checkExpectedAllocation(response, "subcluster5", 1, 25); + + } + + private void prepPolicyWithHeadroom() throws YarnException { + AllocateResponse ar = getAllocateResponseWithTargetHeadroom(100); + ((FederationAMRMProxyPolicy) getPolicy()) + .notifyOfResponse(SubClusterId.newInstance("subcluster0"), ar); + + ar = getAllocateResponseWithTargetHeadroom(0); + ((FederationAMRMProxyPolicy) getPolicy()) + .notifyOfResponse(SubClusterId.newInstance("subcluster1"), ar); + + ar = getAllocateResponseWithTargetHeadroom(1); + ((FederationAMRMProxyPolicy) getPolicy()) + .notifyOfResponse(SubClusterId.newInstance("subcluster2"), ar); + } + + private AllocateResponse getAllocateResponseWithTargetHeadroom( + int numContainers) { + return AllocateResponse.newInstance(0, null, null, + Collections. emptyList(), + Resource.newInstance(numContainers * 1024, numContainers), null, 10, + null, Collections. emptyList()); + } + + @Test + public void testSplitAllocateRequest() throws Exception { + + // Test a complex List is split correctly + initializePolicy(); + + // modify default initialization to include a "homesubcluster" + // which we will use as the default for when nodes or racks are unknown + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(getHomeSubCluster()); + getActiveSubclusters().put(getHomeSubCluster(), sci); + SubClusterIdInfo sc = new SubClusterIdInfo(getHomeSubCluster().getId()); + + getPolicyInfo().getRouterPolicyWeights().put(sc, 0.1f); + getPolicyInfo().getAMRMPolicyWeights().put(sc, 0.1f); + + FederationPoliciesTestUtil.initializePolicyContext( + getFederationPolicyContext(), getPolicy(), getPolicyInfo(), + getActiveSubclusters()); + + List resourceRequests = createComplexRequest(); + + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + + validateSplit(response, resourceRequests); + prettyPrintRequests(response); + + // we expect 7 entries for home subcluster (2 for request-id 4, 3 for + // request-id 5, and a part of the broadcast of request-id 2 + checkExpectedAllocation(response, getHomeSubCluster().getId(), 7, 29); + + // for subcluster0 we expect 10 entries, 3 from request-id 0, and 3 from + // request-id 3, 3 entries from request-id 5, as well as part of the + // request-id 2 broadast + checkExpectedAllocation(response, "subcluster0", 10, 32); + + // we expect 5 entries for subcluster1 (4 from request-id 1, and part + // of the broadcast of request-id 2 + checkExpectedAllocation(response, "subcluster1", 5, 26); + + // sub-cluster 2 should contain 3 entries from request-id 1 and 1 from the + // broadcast of request-id 2, and no request-id 0 + checkExpectedAllocation(response, "subcluster2", 4, 23); + + // subcluster id 3, 4 should not appear (due to weights or active/inactive) + checkExpectedAllocation(response, "subcluster3", -1, -1); + checkExpectedAllocation(response, "subcluster4", -1, -1); + + // subcluster5 should get only part of the request-id 2 broadcast + checkExpectedAllocation(response, "subcluster5", 1, 20); + + // check that the allocations that show up are what expected + for (ResourceRequest rr : response.get(getHomeSubCluster())) { + Assert.assertTrue( + rr.getAllocationRequestId() == 2L || rr.getAllocationRequestId() == 4L + || rr.getAllocationRequestId() == 5L); + } + + List rrs = + response.get(SubClusterId.newInstance("subcluster0")); + for (ResourceRequest rr : rrs) { + Assert.assertTrue(rr.getAllocationRequestId() != 1L); + Assert.assertTrue(rr.getAllocationRequestId() != 4L); + } + + for (ResourceRequest rr : response + .get(SubClusterId.newInstance("subcluster1"))) { + Assert.assertTrue(rr.getAllocationRequestId() == 1L + || rr.getAllocationRequestId() == 2L); + } + + for (ResourceRequest rr : response + .get(SubClusterId.newInstance("subcluster2"))) { + Assert.assertTrue(rr.getAllocationRequestId() == 1L + || rr.getAllocationRequestId() == 2L); + } + + for (ResourceRequest rr : response + .get(SubClusterId.newInstance("subcluster5"))) { + Assert.assertTrue(rr.getAllocationRequestId() == 2); + Assert.assertTrue(rr.getRelaxLocality()); + } + } + + // check that the number of containers in the first ResourceRequest in + // response for this sub-cluster matches expectations. -1 indicate the + // response should be null + private void checkExpectedAllocation( + Map> response, String subCluster, + long totResourceRequests, long totContainers) { + if (totContainers == -1) { + Assert.assertNull(response.get(SubClusterId.newInstance(subCluster))); + } else { + SubClusterId sc = SubClusterId.newInstance(subCluster); + Assert.assertEquals(totResourceRequests, response.get(sc).size()); + + long actualContCount = 0; + for (ResourceRequest rr : response.get(sc)) { + actualContCount += rr.getNumContainers(); + } + Assert.assertEquals(totContainers, actualContCount); + } + } + + private void validateSplit(Map> split, + List original) throws YarnException { + + SubClusterResolver resolver = + getFederationPolicyContext().getFederationSubclusterResolver(); + + // Apply general validation rules + int numUsedSubclusters = split.size(); + + Set originalIds = new HashSet<>(); + Set splitIds = new HashSet<>(); + + int originalContainers = 0; + for (ResourceRequest rr : original) { + originalContainers += rr.getNumContainers(); + originalIds.add(rr.getAllocationRequestId()); + } + + int splitContainers = 0; + for (Map.Entry> rrs : split + .entrySet()) { + for (ResourceRequest rr : rrs.getValue()) { + splitContainers += rr.getNumContainers(); + splitIds.add(rr.getAllocationRequestId()); + // check node-local asks are sent to right RM (only) + SubClusterId fid = null; + try { + fid = resolver.getSubClusterForNode(rr.getResourceName()); + } catch (YarnException e) { + // ignore code will handle + } + if (!rrs.getKey().equals(getHomeSubCluster()) && fid != null + && !fid.equals(rrs.getKey())) { + Assert.fail("A node-local (or resolvable rack-local) RR should not " + + "be send to an RM other than what it resolves to."); + } + } + } + + // check we are not inventing Allocation Ids + Assert.assertEquals(originalIds, splitIds); + + // check we are not exceedingly replicating the container asks among + // RMs (a little is allowed due to rounding of fractional splits) + Assert.assertTrue( + " Containers requested (" + splitContainers + ") should " + + "not exceed the original count of containers (" + + originalContainers + ") by more than the number of subclusters (" + + numUsedSubclusters + ")", + originalContainers + numUsedSubclusters >= splitContainers); + + // Test target Ids + for (SubClusterId targetId : split.keySet()) { + Assert.assertTrue("Target subclusters should be in the active set", + getActiveSubclusters().containsKey(targetId)); + Assert.assertTrue( + "Target subclusters (" + targetId + ") should have weight >0 in " + + "the policy ", + getPolicyInfo().getRouterPolicyWeights() + .get(new SubClusterIdInfo(targetId)) > 0); + } + } + + private void prettyPrintRequests( + Map> response) { + for (Map.Entry> entry : response + .entrySet()) { + String str = ""; + for (ResourceRequest rr : entry.getValue()) { + str += " [id:" + rr.getAllocationRequestId() + " loc:" + + rr.getResourceName() + " numCont:" + rr.getNumContainers() + + "], "; + } + LOG.info(entry.getKey() + " --> " + str); + } + } + + private List createLargeRandomList(int numRR) + throws Exception { + + List out = new ArrayList<>(); + Random rand = new Random(1); + DefaultSubClusterResolverImpl resolver = + (DefaultSubClusterResolverImpl) getFederationPolicyContext() + .getFederationSubclusterResolver(); + + List nodes = + new ArrayList<>(resolver.getNodeToSubCluster().keySet()); + + for (int i = 0; i < numRR; i++) { + String nodeName = nodes.get(rand.nextInt(nodes.size())); + long allocationId = (long) rand.nextInt(20); + + // create a single container request in sc0 + out.add(FederationPoliciesTestUtil.createResourceRequest(allocationId, + nodeName, 1024, 1, 1, rand.nextInt(100), null, rand.nextBoolean())); + } + return out; + } + + private List createSimpleRequest() throws Exception { + + List out = new ArrayList<>(); + + // create a single container request in sc0 + out.add(FederationPoliciesTestUtil.createResourceRequest(0L, + ResourceRequest.ANY, 1024, 1, 1, 100, null, true)); + return out; + } + + private List createZeroSizedANYRequest() throws Exception { + + List out = new ArrayList<>(); + + // create a single container request in sc0 + out.add(FederationPoliciesTestUtil.createResourceRequest(0L, + ResourceRequest.ANY, 1024, 1, 1, 0, null, true)); + return out; + } + + private List createComplexRequest() throws Exception { + + List out = new ArrayList<>(); + + // create a single container request in sc0 + out.add(FederationPoliciesTestUtil.createResourceRequest(0L, + "subcluster0-rack0-host0", 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(0L, + "subcluster0-rack0", 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(0L, + ResourceRequest.ANY, 1024, 1, 1, 1, null, false)); + + // create a single container request with 3 alternative hosts across sc1,sc2 + // where we want 2 containers in sc1 and 1 in sc2 + out.add(FederationPoliciesTestUtil.createResourceRequest(1L, + "subcluster1-rack1-host1", 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(1L, + "subcluster1-rack1-host2", 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(1L, + "subcluster2-rack3-host3", 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(1L, + "subcluster1-rack1", 1024, 1, 1, 2, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(1L, + "subcluster2-rack3", 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(1L, + ResourceRequest.ANY, 1024, 1, 1, 3, null, false)); + + // create a non-local ANY request that can span anything + out.add(FederationPoliciesTestUtil.createResourceRequest(2L, + ResourceRequest.ANY, 1024, 1, 1, 100, null, true)); + + // create a single container request in sc0 with relaxed locality + out.add(FederationPoliciesTestUtil.createResourceRequest(3L, + "subcluster0-rack0-host0", 1024, 1, 1, 1, null, true)); + out.add(FederationPoliciesTestUtil.createResourceRequest(3L, + "subcluster0-rack0", 1024, 1, 1, 1, null, true)); + out.add(FederationPoliciesTestUtil.createResourceRequest(3L, + ResourceRequest.ANY, 1024, 1, 1, 1, null, true)); + + // create a request of an unknown node/rack and expect this to show up + // in homesubcluster + out.add(FederationPoliciesTestUtil.createResourceRequest(4L, "unknownNode", + 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(4L, "unknownRack", + 1024, 1, 1, 1, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(4L, + ResourceRequest.ANY, 1024, 1, 1, 1, null, false)); + + // create a request of two hosts, an unknown node and a known node, both in + // a known rack, and expect the unknown node to show up in homesubcluster + out.add(FederationPoliciesTestUtil.createResourceRequest(5L, + "subcluster0-rack0-host0", 1024, 1, 1, 2, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(5L, + "subcluster0-rack0", 1024, 1, 1, 2, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(5L, "node4", 1024, + 1, 1, 2, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(5L, "rack2", 1024, + 1, 1, 2, null, false)); + out.add(FederationPoliciesTestUtil.createResourceRequest(5L, + ResourceRequest.ANY, 1024, 1, 1, 4, null, false)); + + return out; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestRejectAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestRejectAMRMProxyPolicy.java new file mode 100644 index 00000000000..41e7fed2194 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestRejectAMRMProxyPolicy.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.amrmproxy; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.server.federation.policies.BaseFederationPoliciesTest; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link RejectAMRMProxyPolicy}. + */ +public class TestRejectAMRMProxyPolicy + extends BaseFederationPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new RejectAMRMProxyPolicy()); + // needed for base test to work + setPolicyInfo(mock(WeightedPolicyInfo.class)); + + for (int i = 1; i <= 2; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + mock(WeightedPolicyInfo.class), getActiveSubclusters()); + + } + + @Test (expected = FederationPolicyException.class) + public void testSplitAllocateRequest() throws Exception { + // verify the request is broadcasted to all subclusters + String[] hosts = new String[] {"host1", "host2" }; + List resourceRequests = FederationPoliciesTestUtil + .createResourceRequests(hosts, 2 * 1024, 2, 1, 3, null, false); + + Map> response = + ((FederationAMRMProxyPolicy) getPolicy()) + .splitResourceRequests(resourceRequests); + } + + + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/BasePolicyManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/BasePolicyManagerTest.java new file mode 100644 index 00000000000..bd99cb52ee0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/BasePolicyManagerTest.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Test; + +/** + * This class provides common test methods for testing {@code + * FederationPolicyManager}s. + */ +public abstract class BasePolicyManagerTest { + + @SuppressWarnings("checkstyle:visibilitymodifier") + protected FederationPolicyManager wfp = null; + @SuppressWarnings("checkstyle:visibilitymodifier") + protected Class expectedPolicyManager; + @SuppressWarnings("checkstyle:visibilitymodifier") + protected Class expectedAMRMProxyPolicy; + @SuppressWarnings("checkstyle:visibilitymodifier") + protected Class expectedRouterPolicy; + + @Test + public void testSerializeAndInstantiate() throws Exception { + serializeAndDeserializePolicyManager(wfp, expectedPolicyManager, + expectedAMRMProxyPolicy, expectedRouterPolicy); + } + + @Test(expected = FederationPolicyInitializationException.class) + public void testSerializeAndInstantiateBad1() throws Exception { + serializeAndDeserializePolicyManager(wfp, String.class, + expectedAMRMProxyPolicy, expectedRouterPolicy); + } + + @Test(expected = AssertionError.class) + public void testSerializeAndInstantiateBad2() throws Exception { + serializeAndDeserializePolicyManager(wfp, expectedPolicyManager, + String.class, expectedRouterPolicy); + } + + @Test(expected = AssertionError.class) + public void testSerializeAndInstantiateBad3() throws Exception { + serializeAndDeserializePolicyManager(wfp, expectedPolicyManager, + expectedAMRMProxyPolicy, String.class); + } + + protected static void serializeAndDeserializePolicyManager( + FederationPolicyManager wfp, Class policyManagerType, + Class expAMRMProxyPolicy, Class expRouterPolicy) throws Exception { + + // serializeConf it in a context + SubClusterPolicyConfiguration fpc = wfp.serializeConf(); + fpc.setType(policyManagerType.getCanonicalName()); + FederationPolicyInitializationContext context = + new FederationPolicyInitializationContext(); + context.setSubClusterPolicyConfiguration(fpc); + context + .setFederationStateStoreFacade(FederationPoliciesTestUtil.initFacade()); + context.setFederationSubclusterResolver( + FederationPoliciesTestUtil.initResolver()); + context.setHomeSubcluster(SubClusterId.newInstance("homesubcluster")); + + // based on the "context" created instantiate new class and use it + Class c = Class.forName(wfp.getClass().getCanonicalName()); + FederationPolicyManager wfp2 = (FederationPolicyManager) c.newInstance(); + + FederationAMRMProxyPolicy federationAMRMProxyPolicy = + wfp2.getAMRMPolicy(context, null); + + FederationRouterPolicy federationRouterPolicy = + wfp2.getRouterPolicy(context, null); + + Assert.assertEquals(federationAMRMProxyPolicy.getClass(), + expAMRMProxyPolicy); + + Assert.assertEquals(federationRouterPolicy.getClass(), expRouterPolicy); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestHashBasedBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestHashBasedBroadcastPolicyManager.java new file mode 100644 index 00000000000..5fc4a562f8e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestHashBasedBroadcastPolicyManager.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.HashBasedRouterPolicy; +import org.junit.Before; + +/** + * Simple test of {@link HashBroadcastPolicyManager}. + */ +public class TestHashBasedBroadcastPolicyManager extends BasePolicyManagerTest { + + @Before + public void setup() { + // config policy + wfp = new HashBroadcastPolicyManager(); + wfp.setQueue("queue1"); + + // set expected params that the base test class will use for tests + expectedPolicyManager = HashBroadcastPolicyManager.class; + expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class; + expectedRouterPolicy = HashBasedRouterPolicy.class; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestPriorityBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestPriorityBroadcastPolicyManager.java new file mode 100644 index 00000000000..21b39e909ed --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestPriorityBroadcastPolicyManager.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test of {@link PriorityBroadcastPolicyManager}. + */ +public class TestPriorityBroadcastPolicyManager extends BasePolicyManagerTest { + + private WeightedPolicyInfo policyInfo; + + @Before + public void setup() { + // configure a policy + + wfp = new PriorityBroadcastPolicyManager(); + wfp.setQueue("queue1"); + SubClusterId sc1 = SubClusterId.newInstance("sc1"); + SubClusterId sc2 = SubClusterId.newInstance("sc2"); + policyInfo = new WeightedPolicyInfo(); + + Map routerWeights = new HashMap<>(); + routerWeights.put(new SubClusterIdInfo(sc1), 0.2f); + routerWeights.put(new SubClusterIdInfo(sc2), 0.8f); + policyInfo.setRouterPolicyWeights(routerWeights); + + ((PriorityBroadcastPolicyManager) wfp).setWeightedPolicyInfo(policyInfo); + + // set expected params that the base test class will use for tests + expectedPolicyManager = PriorityBroadcastPolicyManager.class; + expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class; + expectedRouterPolicy = PriorityRouterPolicy.class; + } + + @Test + public void testPolicyInfoSetCorrectly() throws Exception { + serializeAndDeserializePolicyManager(wfp, expectedPolicyManager, + expectedAMRMProxyPolicy, expectedRouterPolicy); + + // check the policyInfo propagates through ser/der correctly + Assert.assertEquals( + ((PriorityBroadcastPolicyManager) wfp).getWeightedPolicyInfo(), + policyInfo); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestRejectAllPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestRejectAllPolicyManager.java new file mode 100644 index 00000000000..e4dc7f43e05 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestRejectAllPolicyManager.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.RejectAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.RejectRouterPolicy; +import org.junit.Before; + +/** + * Simple test of {@link RejectAllPolicyManager}. + */ +public class TestRejectAllPolicyManager extends BasePolicyManagerTest { + + @Before + public void setup() { + // config policy + wfp = new RejectAllPolicyManager(); + wfp.setQueue("queue1"); + + // set expected params that the base test class will use for tests + expectedPolicyManager = RejectAllPolicyManager.class; + expectedAMRMProxyPolicy = RejectAMRMProxyPolicy.class; + expectedRouterPolicy = RejectRouterPolicy.class; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestUniformBroadcastPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestUniformBroadcastPolicyManager.java new file mode 100644 index 00000000000..57fafdc79bd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestUniformBroadcastPolicyManager.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy; +import org.junit.Before; + +/** + * Simple test of {@link UniformBroadcastPolicyManager}. + */ +public class TestUniformBroadcastPolicyManager extends BasePolicyManagerTest { + + @Before + public void setup() { + //config policy + wfp = new UniformBroadcastPolicyManager(); + wfp.setQueue("queue1"); + + //set expected params that the base test class will use for tests + expectedPolicyManager = UniformBroadcastPolicyManager.class; + expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class; + expectedRouterPolicy = UniformRandomRouterPolicy.class; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestWeightedLocalityPolicyManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestWeightedLocalityPolicyManager.java new file mode 100644 index 00000000000..51661473000 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/TestWeightedLocalityPolicyManager.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.manager; + +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.LocalityMulticastAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.policies.router.WeightedRandomRouterPolicy; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +/** + * Simple test of {@link WeightedLocalityPolicyManager}. + */ +public class TestWeightedLocalityPolicyManager extends + BasePolicyManagerTest { + + private WeightedPolicyInfo policyInfo; + + @Before + public void setup() { + // configure a policy + + wfp = new WeightedLocalityPolicyManager(); + wfp.setQueue("queue1"); + SubClusterId sc1 = SubClusterId.newInstance("sc1"); + SubClusterId sc2 = SubClusterId.newInstance("sc2"); + policyInfo = new WeightedPolicyInfo(); + + Map routerWeights = new HashMap<>(); + routerWeights.put(new SubClusterIdInfo(sc1), 0.2f); + routerWeights.put(new SubClusterIdInfo(sc2), 0.8f); + policyInfo.setRouterPolicyWeights(routerWeights); + + Map amrmWeights = new HashMap<>(); + amrmWeights.put(new SubClusterIdInfo(sc1), 0.2f); + amrmWeights.put(new SubClusterIdInfo(sc2), 0.8f); + policyInfo.setAMRMPolicyWeights(amrmWeights); + + ((WeightedLocalityPolicyManager) wfp).setWeightedPolicyInfo( + policyInfo); + + //set expected params that the base test class will use for tests + expectedPolicyManager = WeightedLocalityPolicyManager.class; + expectedAMRMProxyPolicy = LocalityMulticastAMRMProxyPolicy.class; + expectedRouterPolicy = WeightedRandomRouterPolicy.class; + } + + @Test + public void testPolicyInfoSetCorrectly() throws Exception { + serializeAndDeserializePolicyManager(wfp, expectedPolicyManager, + expectedAMRMProxyPolicy, + expectedRouterPolicy); + + //check the policyInfo propagates through ser/der correctly + Assert.assertEquals(((WeightedLocalityPolicyManager) wfp) + .getWeightedPolicyInfo(), policyInfo); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/BaseRouterPoliciesTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/BaseRouterPoliciesTest.java new file mode 100644 index 00000000000..d09ba754d55 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/BaseRouterPoliciesTest.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.BaseFederationPoliciesTest; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.Assert; +import org.junit.Test; + +/** + * Base class for router policies tests, tests for null input cases. + */ +public abstract class BaseRouterPoliciesTest + extends BaseFederationPoliciesTest { + + @Test + public void testNullQueueRouting() throws YarnException { + FederationRouterPolicy localPolicy = (FederationRouterPolicy) getPolicy(); + ApplicationSubmissionContext applicationSubmissionContext = + ApplicationSubmissionContext.newInstance(null, null, null, null, null, + false, false, 0, Resources.none(), null, false, null, null); + SubClusterId chosen = + localPolicy.getHomeSubcluster(applicationSubmissionContext, null); + Assert.assertNotNull(chosen); + } + + @Test(expected = FederationPolicyException.class) + public void testNullAppContext() throws YarnException { + ((FederationRouterPolicy) getPolicy()).getHomeSubcluster(null, null); + } + + @Test + public void testBlacklistSubcluster() throws YarnException { + FederationRouterPolicy localPolicy = (FederationRouterPolicy) getPolicy(); + ApplicationSubmissionContext applicationSubmissionContext = + ApplicationSubmissionContext.newInstance(null, null, null, null, null, + false, false, 0, Resources.none(), null, false, null, null); + Map activeSubClusters = + getActiveSubclusters(); + if (activeSubClusters != null && activeSubClusters.size() > 1 + && !(localPolicy instanceof RejectRouterPolicy)) { + // blacklist all the active subcluster but one. + Random random = new Random(); + List blacklistSubclusters = + new ArrayList(activeSubClusters.keySet()); + SubClusterId removed = blacklistSubclusters + .remove(random.nextInt(blacklistSubclusters.size())); + // bias LoadBasedRouterPolicy + getPolicyInfo().getRouterPolicyWeights() + .put(new SubClusterIdInfo(removed), 1.0f); + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + + SubClusterId chosen = localPolicy.getHomeSubcluster( + applicationSubmissionContext, blacklistSubclusters); + + // check that the selected sub-cluster is only one not blacklisted + Assert.assertNotNull(chosen); + Assert.assertEquals(removed, chosen); + } + } + + /** + * This test validates the correctness of blacklist logic in case the cluster + * has no active subclusters. + */ + @Test + public void testAllBlacklistSubcluster() throws YarnException { + FederationRouterPolicy localPolicy = (FederationRouterPolicy) getPolicy(); + ApplicationSubmissionContext applicationSubmissionContext = + ApplicationSubmissionContext.newInstance(null, null, null, null, null, + false, false, 0, Resources.none(), null, false, null, null); + Map activeSubClusters = + getActiveSubclusters(); + if (activeSubClusters != null && activeSubClusters.size() > 1 + && !(localPolicy instanceof RejectRouterPolicy)) { + List blacklistSubclusters = + new ArrayList(activeSubClusters.keySet()); + try { + localPolicy.getHomeSubcluster(applicationSubmissionContext, + blacklistSubclusters); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage() + .equals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE)); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestHashBasedRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestHashBasedRouterPolicy.java new file mode 100644 index 00000000000..ee3e09d2b93 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestHashBasedRouterPolicy.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link HashBasedRouterPolicy}. Tests that one of + * the active sub-cluster is chosen. + */ +public class TestHashBasedRouterPolicy extends BaseRouterPoliciesTest { + + private int numSubclusters = 10; + + @Before + public void setUp() throws Exception { + + // set policy in base class + setPolicy(new HashBasedRouterPolicy()); + + // setting up the active sub-clusters for this test + setMockActiveSubclusters(numSubclusters); + + // initialize policy with context + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + } + + @Test + public void testHashSpreadUniformlyAmongSubclusters() throws YarnException { + SubClusterId chosen; + + Map counter = new HashMap<>(); + for (SubClusterId id : getActiveSubclusters().keySet()) { + counter.put(id, new AtomicLong(0)); + } + + long jobPerSub = 100; + + ApplicationSubmissionContext applicationSubmissionContext = + mock(ApplicationSubmissionContext.class); + for (int i = 0; i < jobPerSub * numSubclusters; i++) { + when(applicationSubmissionContext.getQueue()).thenReturn("queue" + i); + chosen = ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(applicationSubmissionContext, null); + counter.get(chosen).addAndGet(1); + } + + // hash spread the jobs equally among the subclusters + for (AtomicLong a : counter.values()) { + Assert.assertEquals(a.get(), jobPerSub); + } + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestLoadBasedRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestLoadBasedRouterPolicy.java new file mode 100644 index 00000000000..dc8f99bfce3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestLoadBasedRouterPolicy.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link LoadBasedRouterPolicy}. Test that the load + * is properly considered for allocation. + */ +public class TestLoadBasedRouterPolicy extends BaseRouterPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new LoadBasedRouterPolicy()); + setPolicyInfo(new WeightedPolicyInfo()); + Map routerWeights = new HashMap<>(); + Map amrmWeights = new HashMap<>(); + + // simulate 20 active subclusters + for (int i = 0; i < 20; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo(String.format("sc%02d", i)); + SubClusterInfo federationSubClusterInfo = + SubClusterInfo.newInstance(sc.toId(), null, null, null, null, -1, + SubClusterState.SC_RUNNING, -1, generateClusterMetricsInfo(i)); + getActiveSubclusters().put(sc.toId(), federationSubClusterInfo); + float weight = getRand().nextInt(2); + if (i == 5) { + weight = 1.0f; + } + + // 5% chance we omit one of the weights + if (i <= 5 || getRand().nextFloat() > 0.05f) { + routerWeights.put(sc, weight); + amrmWeights.put(sc, weight); + } + } + getPolicyInfo().setRouterPolicyWeights(routerWeights); + getPolicyInfo().setAMRMPolicyWeights(amrmWeights); + + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + + } + + private String generateClusterMetricsInfo(int id) { + + long mem = 1024 * getRand().nextInt(277 * 100 - 1); + // plant a best cluster + if (id == 5) { + mem = 1024 * 277 * 100; + } + String clusterMetrics = + "{\"clusterMetrics\":{\"appsSubmitted\":65," + "\"appsCompleted\":64," + + "\"appsPending\":0,\"appsRunning\":0,\"appsFailed\":0," + + "\"appsKilled\":1,\"reservedMB\":0,\"availableMB\":" + mem + "," + + "\"allocatedMB\":0,\"reservedVirtualCores\":0," + + "\"availableVirtualCores\":2216,\"allocatedVirtualCores\":0," + + "\"containersAllocated\":0,\"containersReserved\":0," + + "\"containersPending\":0,\"totalMB\":28364800," + + "\"totalVirtualCores\":2216,\"totalNodes\":278,\"lostNodes\":1," + + "\"unhealthyNodes\":0,\"decommissionedNodes\":0," + + "\"rebootedNodes\":0,\"activeNodes\":277}}\n"; + + return clusterMetrics; + + } + + @Test + public void testLoadIsRespected() throws YarnException { + + SubClusterId chosen = ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(getApplicationSubmissionContext(), null); + + // check the "planted" best cluster is chosen + Assert.assertEquals("sc05", chosen.getId()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java new file mode 100644 index 00000000000..3c036c18124 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestPriorityRouterPolicy.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.policies.router; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link PriorityRouterPolicy}. Tests that the + * weights are correctly used for ordering the choice of sub-clusters. + */ +public class TestPriorityRouterPolicy extends BaseRouterPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new PriorityRouterPolicy()); + setPolicyInfo(new WeightedPolicyInfo()); + Map routerWeights = new HashMap<>(); + Map amrmWeights = new HashMap<>(); + + // simulate 20 subclusters with a 5% chance of being inactive + for (int i = 0; i < 20; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + + // with 5% omit a subcluster + if (getRand().nextFloat() < 0.95f || i == 5) { + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + float weight = getRand().nextFloat(); + if (i == 5) { + weight = 1.1f; // guaranteed to be the largest. + } + + // 5% chance we omit one of the weights + if (i <= 5 || getRand().nextFloat() > 0.05f) { + routerWeights.put(sc, weight); + amrmWeights.put(sc, weight); + } + } + getPolicyInfo().setRouterPolicyWeights(routerWeights); + getPolicyInfo().setAMRMPolicyWeights(amrmWeights); + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + + } + + @Test + public void testPickLowestWeight() throws YarnException { + SubClusterId chosen = ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(getApplicationSubmissionContext(), null); + Assert.assertEquals("sc5", chosen.getId()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestRejectRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestRejectRouterPolicy.java new file mode 100644 index 00000000000..1747f73715c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestRejectRouterPolicy.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link RejectRouterPolicy}. Tests that one of the + * active subcluster is chosen. + */ +public class TestRejectRouterPolicy extends BaseRouterPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new RejectRouterPolicy()); + + // setting up the active sub-clusters for this test + setMockActiveSubclusters(2); + + // initialize policy with context + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + + } + + @Test(expected = FederationPolicyException.class) + public void testNoClusterIsChosen() throws YarnException { + ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(getApplicationSubmissionContext(), null); + } + + @Override + @Test(expected = FederationPolicyException.class) + public void testNullQueueRouting() throws YarnException { + FederationRouterPolicy localPolicy = (FederationRouterPolicy) getPolicy(); + ApplicationSubmissionContext applicationSubmissionContext = + ApplicationSubmissionContext.newInstance(null, null, null, null, null, + false, false, 0, Resources.none(), null, false, null, null); + localPolicy.getHomeSubcluster(applicationSubmissionContext, null); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestUniformRandomRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestUniformRandomRouterPolicy.java new file mode 100644 index 00000000000..05490aba672 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestUniformRandomRouterPolicy.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link UniformRandomRouterPolicy}. Tests that one + * of the active subcluster is chosen. + */ +public class TestUniformRandomRouterPolicy extends BaseRouterPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new UniformRandomRouterPolicy()); + // needed for base test to work + setPolicyInfo(mock(WeightedPolicyInfo.class)); + for (int i = 1; i <= 2; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + mock(WeightedPolicyInfo.class), getActiveSubclusters()); + } + + @Test + public void testOneSubclusterIsChosen() throws YarnException { + SubClusterId chosen = ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(getApplicationSubmissionContext(), null); + Assert.assertTrue(getActiveSubclusters().keySet().contains(chosen)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java new file mode 100644 index 00000000000..c969a30e65f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.policies.router; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationPoliciesTestUtil; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Simple test class for the {@link WeightedRandomRouterPolicy}. Generate large + * number of randomized tests to check we are weighiting correctly even if + * clusters go inactive. + */ +public class TestWeightedRandomRouterPolicy extends BaseRouterPoliciesTest { + + @Before + public void setUp() throws Exception { + setPolicy(new WeightedRandomRouterPolicy()); + setPolicyInfo(new WeightedPolicyInfo()); + Map routerWeights = new HashMap<>(); + Map amrmWeights = new HashMap<>(); + + float numSubClusters = 20; + // simulate N subclusters each with a 5% chance of being inactive + for (int i = 0; i < numSubClusters; i++) { + SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i); + // with 5% omit a subcluster + if (getRand().nextFloat() < 0.95f) { + SubClusterInfo sci = mock(SubClusterInfo.class); + when(sci.getState()).thenReturn(SubClusterState.SC_RUNNING); + when(sci.getSubClusterId()).thenReturn(sc.toId()); + getActiveSubclusters().put(sc.toId(), sci); + } + + // 80% of the weight is evenly spread, 20% is randomly generated + float weight = + (0.8f * 1f / numSubClusters) + (0.2f * getRand().nextFloat()); + + // also 5% chance we omit one of the weights + if (i <= 5 || getRand().nextFloat() > 0.05f) { + routerWeights.put(sc, weight); + amrmWeights.put(sc, weight); + } + } + getPolicyInfo().setRouterPolicyWeights(routerWeights); + getPolicyInfo().setAMRMPolicyWeights(amrmWeights); + + FederationPoliciesTestUtil.initializePolicyContext(getPolicy(), + getPolicyInfo(), getActiveSubclusters()); + + } + + @Test + public void testClusterChosenWithRightProbability() throws YarnException { + + ApplicationSubmissionContext context = + mock(ApplicationSubmissionContext.class); + when(context.getQueue()).thenReturn("queue1"); + setApplicationSubmissionContext(context); + + Map counter = new HashMap<>(); + for (SubClusterIdInfo id : getPolicyInfo().getRouterPolicyWeights() + .keySet()) { + counter.put(id.toId(), new AtomicLong(0)); + } + + float numberOfDraws = 10000; + + for (float i = 0; i < numberOfDraws; i++) { + SubClusterId chosenId = ((FederationRouterPolicy) getPolicy()) + .getHomeSubcluster(getApplicationSubmissionContext(), null); + counter.get(chosenId).incrementAndGet(); + } + + float totalActiveWeight = 0; + for (SubClusterId id : getActiveSubclusters().keySet()) { + SubClusterIdInfo idInfo = new SubClusterIdInfo(id); + if (getPolicyInfo().getRouterPolicyWeights().containsKey(idInfo)) { + totalActiveWeight += + getPolicyInfo().getRouterPolicyWeights().get(idInfo); + } + } + + for (Map.Entry counterEntry : counter + .entrySet()) { + float expectedWeight = getPolicyInfo().getRouterPolicyWeights() + .get(new SubClusterIdInfo(counterEntry.getKey())) / totalActiveWeight; + float actualWeight = counterEntry.getValue().floatValue() / numberOfDraws; + + // make sure that the weights is respected among active subclusters + // and no jobs are routed to inactive subclusters. + if (getActiveSubclusters().containsKey(counterEntry.getKey())) { + Assert.assertTrue( + "Id " + counterEntry.getKey() + " Actual weight: " + actualWeight + + " expected weight: " + expectedWeight, + Math.abs(actualWeight - expectedWeight) < 0.01); + } else { + Assert + .assertTrue( + "Id " + counterEntry.getKey() + " Actual weight: " + + actualWeight + " expected weight: " + expectedWeight, + actualWeight == 0); + + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/resolver/TestDefaultSubClusterResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/resolver/TestDefaultSubClusterResolver.java new file mode 100644 index 00000000000..25d246e371e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/resolver/TestDefaultSubClusterResolver.java @@ -0,0 +1,189 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.federation.resolver; + +import java.io.File; +import java.net.URL; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test {@link SubClusterResolver} against correct and malformed Federation + * machine lists. + */ +public class TestDefaultSubClusterResolver { + private static YarnConfiguration conf; + private static SubClusterResolver resolver; + + public static void setUpGoodFile() { + conf = new YarnConfiguration(); + resolver = new DefaultSubClusterResolverImpl(); + + URL url = + Thread.currentThread().getContextClassLoader().getResource("nodes"); + if (url == null) { + throw new RuntimeException( + "Could not find 'nodes' dummy file in classpath"); + } + // This will get rid of the beginning '/' in the url in Windows env + File file = new File(url.getPath()); + + conf.set(YarnConfiguration.FEDERATION_MACHINE_LIST, file.getPath()); + resolver.setConf(conf); + resolver.load(); + } + + private void setUpMalformedFile() { + conf = new YarnConfiguration(); + resolver = new DefaultSubClusterResolverImpl(); + + URL url = Thread.currentThread().getContextClassLoader() + .getResource("nodes-malformed"); + if (url == null) { + throw new RuntimeException( + "Could not find 'nodes-malformed' dummy file in classpath"); + } + // This will get rid of the beginning '/' in the url in Windows env + File file = new File(url.getPath()); + + conf.set(YarnConfiguration.FEDERATION_MACHINE_LIST, file.getPath()); + resolver.setConf(conf); + resolver.load(); + } + + private void setUpNonExistentFile() { + conf = new YarnConfiguration(); + resolver = new DefaultSubClusterResolverImpl(); + + conf.set(YarnConfiguration.FEDERATION_MACHINE_LIST, "fileDoesNotExist"); + resolver.setConf(conf); + resolver.load(); + } + + @Test + public void testGetSubClusterForNode() throws YarnException { + setUpGoodFile(); + + // All lowercase, no whitespace in machine list file + Assert.assertEquals(SubClusterId.newInstance("subcluster1"), + resolver.getSubClusterForNode("node1")); + // Leading and trailing whitespace in machine list file + Assert.assertEquals(SubClusterId.newInstance("subcluster2"), + resolver.getSubClusterForNode("node2")); + // Node name capitalization in machine list file + Assert.assertEquals(SubClusterId.newInstance("subcluster3"), + resolver.getSubClusterForNode("node3")); + + try { + resolver.getSubClusterForNode("nodeDoesNotExist"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Cannot find subClusterId for node")); + } + } + + @Test + public void testGetSubClusterForNodeMalformedFile() throws YarnException { + setUpMalformedFile(); + + try { + resolver.getSubClusterForNode("node1"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Cannot find subClusterId for node")); + } + + try { + resolver.getSubClusterForNode("node2"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Cannot find subClusterId for node")); + } + + Assert.assertEquals(SubClusterId.newInstance("subcluster3"), + resolver.getSubClusterForNode("node3")); + + try { + resolver.getSubClusterForNode("nodeDoesNotExist"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Cannot find subClusterId for node")); + } + } + + @Test + public void testGetSubClusterForNodeNoFile() throws YarnException { + setUpNonExistentFile(); + + try { + resolver.getSubClusterForNode("node1"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Cannot find subClusterId for node")); + } + } + + @Test + public void testGetSubClustersForRack() throws YarnException { + setUpGoodFile(); + + Set rack1Expected = new HashSet(); + rack1Expected.add(SubClusterId.newInstance("subcluster1")); + rack1Expected.add(SubClusterId.newInstance("subcluster2")); + + Set rack2Expected = new HashSet(); + rack2Expected.add(SubClusterId.newInstance("subcluster3")); + + // Two subclusters have nodes in rack1 + Assert.assertEquals(rack1Expected, resolver.getSubClustersForRack("rack1")); + + // Two nodes are in rack2, but both belong to subcluster3 + Assert.assertEquals(rack2Expected, resolver.getSubClustersForRack("rack2")); + + try { + resolver.getSubClustersForRack("rackDoesNotExist"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage().startsWith("Cannot resolve rack")); + } + } + + @Test + public void testGetSubClustersForRackNoFile() throws YarnException { + setUpNonExistentFile(); + + try { + resolver.getSubClustersForRack("rack1"); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage().startsWith("Cannot resolve rack")); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java new file mode 100644 index 00000000000..15cc0f0a009 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java @@ -0,0 +1,578 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Calendar; +import java.util.List; +import java.util.TimeZone; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreException; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.util.MonotonicClock; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Base class for FederationMembershipStateStore implementations. + */ +public abstract class FederationStateStoreBaseTest { + + private static final MonotonicClock CLOCK = new MonotonicClock(); + private FederationStateStore stateStore = createStateStore(); + + protected abstract FederationStateStore createStateStore(); + + private Configuration conf; + + @Before + public void before() throws IOException, YarnException { + stateStore.init(conf); + } + + @After + public void after() throws Exception { + stateStore.close(); + } + + // Test FederationMembershipStateStore + + @Test + public void testRegisterSubCluster() throws Exception { + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + + SubClusterInfo subClusterInfo = createSubClusterInfo(subClusterId); + + long previousTimeStamp = + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis(); + + SubClusterRegisterResponse result = stateStore.registerSubCluster( + SubClusterRegisterRequest.newInstance(subClusterInfo)); + + long currentTimeStamp = + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis(); + + Assert.assertNotNull(result); + Assert.assertEquals(subClusterInfo, querySubClusterInfo(subClusterId)); + + // The saved heartbeat is between the old one and the current timestamp + Assert.assertTrue(querySubClusterInfo(subClusterId) + .getLastHeartBeat() <= currentTimeStamp); + Assert.assertTrue(querySubClusterInfo(subClusterId) + .getLastHeartBeat() >= previousTimeStamp); + } + + @Test + public void testDeregisterSubCluster() throws Exception { + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + registerSubCluster(createSubClusterInfo(subClusterId)); + + SubClusterDeregisterRequest deregisterRequest = SubClusterDeregisterRequest + .newInstance(subClusterId, SubClusterState.SC_UNREGISTERED); + + stateStore.deregisterSubCluster(deregisterRequest); + + Assert.assertEquals(SubClusterState.SC_UNREGISTERED, + querySubClusterInfo(subClusterId).getState()); + } + + @Test + public void testDeregisterSubClusterUnknownSubCluster() throws Exception { + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + + SubClusterDeregisterRequest deregisterRequest = SubClusterDeregisterRequest + .newInstance(subClusterId, SubClusterState.SC_UNREGISTERED); + try { + stateStore.deregisterSubCluster(deregisterRequest); + Assert.fail(); + } catch (FederationStateStoreException e) { + Assert.assertTrue(e.getMessage().startsWith("SubCluster SC not found")); + } + } + + @Test + public void testGetSubClusterInfo() throws Exception { + + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + SubClusterInfo subClusterInfo = createSubClusterInfo(subClusterId); + registerSubCluster(subClusterInfo); + + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterId); + Assert.assertEquals(subClusterInfo, + stateStore.getSubCluster(request).getSubClusterInfo()); + } + + @Test + public void testGetSubClusterInfoUnknownSubCluster() throws Exception { + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterId); + + GetSubClusterInfoResponse response = stateStore.getSubCluster(request); + Assert.assertNull(response); + } + + @Test + public void testGetAllSubClustersInfo() throws Exception { + + SubClusterId subClusterId1 = SubClusterId.newInstance("SC1"); + SubClusterInfo subClusterInfo1 = createSubClusterInfo(subClusterId1); + + SubClusterId subClusterId2 = SubClusterId.newInstance("SC2"); + SubClusterInfo subClusterInfo2 = createSubClusterInfo(subClusterId2); + + stateStore.registerSubCluster( + SubClusterRegisterRequest.newInstance(subClusterInfo1)); + stateStore.registerSubCluster( + SubClusterRegisterRequest.newInstance(subClusterInfo2)); + + stateStore.subClusterHeartbeat(SubClusterHeartbeatRequest + .newInstance(subClusterId1, SubClusterState.SC_RUNNING, "capability")); + stateStore.subClusterHeartbeat(SubClusterHeartbeatRequest.newInstance( + subClusterId2, SubClusterState.SC_UNHEALTHY, "capability")); + + List subClustersActive = + stateStore.getSubClusters(GetSubClustersInfoRequest.newInstance(true)) + .getSubClusters(); + List subClustersAll = + stateStore.getSubClusters(GetSubClustersInfoRequest.newInstance(false)) + .getSubClusters(); + + // SC1 is the only active + Assert.assertEquals(1, subClustersActive.size()); + SubClusterInfo sc1 = subClustersActive.get(0); + Assert.assertEquals(subClusterId1, sc1.getSubClusterId()); + + // SC1 and SC2 are the SubCluster present into the StateStore + + Assert.assertEquals(2, subClustersAll.size()); + Assert.assertTrue(subClustersAll.contains(sc1)); + subClustersAll.remove(sc1); + SubClusterInfo sc2 = subClustersAll.get(0); + Assert.assertEquals(subClusterId2, sc2.getSubClusterId()); + } + + @Test + public void testSubClusterHeartbeat() throws Exception { + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + registerSubCluster(createSubClusterInfo(subClusterId)); + + long previousHeartBeat = + querySubClusterInfo(subClusterId).getLastHeartBeat(); + + SubClusterHeartbeatRequest heartbeatRequest = SubClusterHeartbeatRequest + .newInstance(subClusterId, SubClusterState.SC_RUNNING, "capability"); + stateStore.subClusterHeartbeat(heartbeatRequest); + + long currentTimeStamp = + Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis(); + + Assert.assertEquals(SubClusterState.SC_RUNNING, + querySubClusterInfo(subClusterId).getState()); + + // The saved heartbeat is between the old one and the current timestamp + Assert.assertTrue(querySubClusterInfo(subClusterId) + .getLastHeartBeat() <= currentTimeStamp); + Assert.assertTrue(querySubClusterInfo(subClusterId) + .getLastHeartBeat() >= previousHeartBeat); + } + + @Test + public void testSubClusterHeartbeatUnknownSubCluster() throws Exception { + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + SubClusterHeartbeatRequest heartbeatRequest = SubClusterHeartbeatRequest + .newInstance(subClusterId, SubClusterState.SC_RUNNING, "capability"); + + try { + stateStore.subClusterHeartbeat(heartbeatRequest); + Assert.fail(); + } catch (FederationStateStoreException e) { + Assert.assertTrue(e.getMessage() + .startsWith("SubCluster SC does not exist; cannot heartbeat")); + } + } + + // Test FederationApplicationHomeSubClusterStore + + @Test + public void testAddApplicationHomeSubCluster() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + ApplicationHomeSubCluster ahsc = + ApplicationHomeSubCluster.newInstance(appId, subClusterId); + + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest.newInstance(ahsc); + AddApplicationHomeSubClusterResponse response = + stateStore.addApplicationHomeSubCluster(request); + + Assert.assertEquals(subClusterId, response.getHomeSubCluster()); + Assert.assertEquals(subClusterId, queryApplicationHomeSC(appId)); + + } + + @Test + public void testAddApplicationHomeSubClusterAppAlreadyExists() + throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId1 = SubClusterId.newInstance("SC1"); + addApplicationHomeSC(appId, subClusterId1); + + SubClusterId subClusterId2 = SubClusterId.newInstance("SC2"); + ApplicationHomeSubCluster ahsc2 = + ApplicationHomeSubCluster.newInstance(appId, subClusterId2); + + AddApplicationHomeSubClusterResponse response = + stateStore.addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest.newInstance(ahsc2)); + + Assert.assertEquals(subClusterId1, response.getHomeSubCluster()); + Assert.assertEquals(subClusterId1, queryApplicationHomeSC(appId)); + + } + + @Test + public void testDeleteApplicationHomeSubCluster() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + addApplicationHomeSC(appId, subClusterId); + + DeleteApplicationHomeSubClusterRequest delRequest = + DeleteApplicationHomeSubClusterRequest.newInstance(appId); + + DeleteApplicationHomeSubClusterResponse response = + stateStore.deleteApplicationHomeSubCluster(delRequest); + + Assert.assertNotNull(response); + try { + queryApplicationHomeSC(appId); + Assert.fail(); + } catch (FederationStateStoreException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Application " + appId + " does not exist")); + } + + } + + @Test + public void testDeleteApplicationHomeSubClusterUnknownApp() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + DeleteApplicationHomeSubClusterRequest delRequest = + DeleteApplicationHomeSubClusterRequest.newInstance(appId); + + try { + stateStore.deleteApplicationHomeSubCluster(delRequest); + Assert.fail(); + } catch (FederationStateStoreException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Application " + appId.toString() + " does not exist")); + } + } + + @Test + public void testGetApplicationHomeSubCluster() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId = SubClusterId.newInstance("SC"); + addApplicationHomeSC(appId, subClusterId); + + GetApplicationHomeSubClusterRequest getRequest = + GetApplicationHomeSubClusterRequest.newInstance(appId); + + GetApplicationHomeSubClusterResponse result = + stateStore.getApplicationHomeSubCluster(getRequest); + + Assert.assertEquals(appId, + result.getApplicationHomeSubCluster().getApplicationId()); + Assert.assertEquals(subClusterId, + result.getApplicationHomeSubCluster().getHomeSubCluster()); + } + + @Test + public void testGetApplicationHomeSubClusterUnknownApp() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + GetApplicationHomeSubClusterRequest request = + GetApplicationHomeSubClusterRequest.newInstance(appId); + + try { + stateStore.getApplicationHomeSubCluster(request); + Assert.fail(); + } catch (FederationStateStoreException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Application " + appId.toString() + " does not exist")); + } + } + + @Test + public void testGetApplicationsHomeSubCluster() throws Exception { + ApplicationId appId1 = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId1 = SubClusterId.newInstance("SC1"); + ApplicationHomeSubCluster ahsc1 = + ApplicationHomeSubCluster.newInstance(appId1, subClusterId1); + + ApplicationId appId2 = ApplicationId.newInstance(1, 2); + SubClusterId subClusterId2 = SubClusterId.newInstance("SC2"); + ApplicationHomeSubCluster ahsc2 = + ApplicationHomeSubCluster.newInstance(appId2, subClusterId2); + + addApplicationHomeSC(appId1, subClusterId1); + addApplicationHomeSC(appId2, subClusterId2); + + GetApplicationsHomeSubClusterRequest getRequest = + GetApplicationsHomeSubClusterRequest.newInstance(); + + GetApplicationsHomeSubClusterResponse result = + stateStore.getApplicationsHomeSubCluster(getRequest); + + Assert.assertEquals(2, result.getAppsHomeSubClusters().size()); + Assert.assertTrue(result.getAppsHomeSubClusters().contains(ahsc1)); + Assert.assertTrue(result.getAppsHomeSubClusters().contains(ahsc2)); + } + + @Test + public void testUpdateApplicationHomeSubCluster() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId1 = SubClusterId.newInstance("SC1"); + addApplicationHomeSC(appId, subClusterId1); + + SubClusterId subClusterId2 = SubClusterId.newInstance("SC2"); + ApplicationHomeSubCluster ahscUpdate = + ApplicationHomeSubCluster.newInstance(appId, subClusterId2); + + UpdateApplicationHomeSubClusterRequest updateRequest = + UpdateApplicationHomeSubClusterRequest.newInstance(ahscUpdate); + + UpdateApplicationHomeSubClusterResponse response = + stateStore.updateApplicationHomeSubCluster(updateRequest); + + Assert.assertNotNull(response); + Assert.assertEquals(subClusterId2, queryApplicationHomeSC(appId)); + } + + @Test + public void testUpdateApplicationHomeSubClusterUnknownApp() throws Exception { + ApplicationId appId = ApplicationId.newInstance(1, 1); + SubClusterId subClusterId1 = SubClusterId.newInstance("SC1"); + ApplicationHomeSubCluster ahsc = + ApplicationHomeSubCluster.newInstance(appId, subClusterId1); + + UpdateApplicationHomeSubClusterRequest updateRequest = + UpdateApplicationHomeSubClusterRequest.newInstance(ahsc); + + try { + stateStore.updateApplicationHomeSubCluster((updateRequest)); + Assert.fail(); + } catch (FederationStateStoreException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Application " + appId.toString() + " does not exist")); + } + } + + // Test FederationPolicyStore + + @Test + public void testSetPolicyConfiguration() throws Exception { + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest + .newInstance(createSCPolicyConf("Queue", "PolicyType")); + + SetSubClusterPolicyConfigurationResponse result = + stateStore.setPolicyConfiguration(request); + + Assert.assertNotNull(result); + Assert.assertEquals(createSCPolicyConf("Queue", "PolicyType"), + queryPolicy("Queue")); + + } + + @Test + public void testSetPolicyConfigurationUpdateExisting() throws Exception { + setPolicyConf("Queue", "PolicyType1"); + + SetSubClusterPolicyConfigurationRequest request2 = + SetSubClusterPolicyConfigurationRequest + .newInstance(createSCPolicyConf("Queue", "PolicyType2")); + SetSubClusterPolicyConfigurationResponse result = + stateStore.setPolicyConfiguration(request2); + + Assert.assertNotNull(result); + Assert.assertEquals(createSCPolicyConf("Queue", "PolicyType2"), + queryPolicy("Queue")); + } + + @Test + public void testGetPolicyConfiguration() throws Exception { + setPolicyConf("Queue", "PolicyType"); + + GetSubClusterPolicyConfigurationRequest getRequest = + GetSubClusterPolicyConfigurationRequest.newInstance("Queue"); + GetSubClusterPolicyConfigurationResponse result = + stateStore.getPolicyConfiguration(getRequest); + + Assert.assertNotNull(result); + Assert.assertEquals(createSCPolicyConf("Queue", "PolicyType"), + result.getPolicyConfiguration()); + + } + + @Test + public void testGetPolicyConfigurationUnknownQueue() throws Exception { + + GetSubClusterPolicyConfigurationRequest request = + GetSubClusterPolicyConfigurationRequest.newInstance("Queue"); + + GetSubClusterPolicyConfigurationResponse response = + stateStore.getPolicyConfiguration(request); + Assert.assertNull(response); + } + + @Test + public void testGetPoliciesConfigurations() throws Exception { + setPolicyConf("Queue1", "PolicyType1"); + setPolicyConf("Queue2", "PolicyType2"); + + GetSubClusterPoliciesConfigurationsResponse response = + stateStore.getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest.newInstance()); + + Assert.assertNotNull(response); + Assert.assertNotNull(response.getPoliciesConfigs()); + + Assert.assertEquals(2, response.getPoliciesConfigs().size()); + + Assert.assertTrue(response.getPoliciesConfigs() + .contains(createSCPolicyConf("Queue1", "PolicyType1"))); + Assert.assertTrue(response.getPoliciesConfigs() + .contains(createSCPolicyConf("Queue2", "PolicyType2"))); + } + + // Convenience methods + + private SubClusterInfo createSubClusterInfo(SubClusterId subClusterId) { + + String amRMAddress = "1.2.3.4:1"; + String clientRMAddress = "1.2.3.4:2"; + String rmAdminAddress = "1.2.3.4:3"; + String webAppAddress = "1.2.3.4:4"; + + return SubClusterInfo.newInstance(subClusterId, amRMAddress, + clientRMAddress, rmAdminAddress, webAppAddress, SubClusterState.SC_NEW, + CLOCK.getTime(), "capability"); + } + + private SubClusterPolicyConfiguration createSCPolicyConf(String queueName, + String policyType) { + ByteBuffer bb = ByteBuffer.allocate(100); + bb.put((byte) 0x02); + return SubClusterPolicyConfiguration.newInstance(queueName, policyType, bb); + } + + private void addApplicationHomeSC(ApplicationId appId, + SubClusterId subClusterId) throws YarnException { + ApplicationHomeSubCluster ahsc = + ApplicationHomeSubCluster.newInstance(appId, subClusterId); + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest.newInstance(ahsc); + stateStore.addApplicationHomeSubCluster(request); + } + + private void setPolicyConf(String queue, String policyType) + throws YarnException { + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest + .newInstance(createSCPolicyConf(queue, policyType)); + stateStore.setPolicyConfiguration(request); + } + + private void registerSubCluster(SubClusterInfo subClusterInfo) + throws YarnException { + stateStore.registerSubCluster( + SubClusterRegisterRequest.newInstance(subClusterInfo)); + } + + private SubClusterInfo querySubClusterInfo(SubClusterId subClusterId) + throws YarnException { + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterId); + return stateStore.getSubCluster(request).getSubClusterInfo(); + } + + private SubClusterId queryApplicationHomeSC(ApplicationId appId) + throws YarnException { + GetApplicationHomeSubClusterRequest request = + GetApplicationHomeSubClusterRequest.newInstance(appId); + + GetApplicationHomeSubClusterResponse response = + stateStore.getApplicationHomeSubCluster(request); + + return response.getApplicationHomeSubCluster().getHomeSubCluster(); + } + + private SubClusterPolicyConfiguration queryPolicy(String queue) + throws YarnException { + GetSubClusterPolicyConfigurationRequest request = + GetSubClusterPolicyConfigurationRequest.newInstance(queue); + + GetSubClusterPolicyConfigurationResponse result = + stateStore.getPolicyConfiguration(request); + return result.getPolicyConfiguration(); + } + + protected void setConf(Configuration conf) { + this.conf = conf; + } + + protected Configuration getConf() { + return conf; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java new file mode 100644 index 00000000000..289a3a61126 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/HSQLDBFederationStateStore.java @@ -0,0 +1,252 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + *http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import java.sql.Connection; +import java.sql.SQLException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * HSQLDB implementation of {@link FederationStateStore}. + */ +public class HSQLDBFederationStateStore extends SQLFederationStateStore { + + private static final Logger LOG = + LoggerFactory.getLogger(HSQLDBFederationStateStore.class); + + private Connection conn; + + private static final String TABLE_APPLICATIONSHOMESUBCLUSTER = + " CREATE TABLE applicationsHomeSubCluster (" + + " applicationId varchar(64) NOT NULL," + + " homeSubCluster varchar(256) NOT NULL," + + " CONSTRAINT pk_applicationId PRIMARY KEY (applicationId))"; + + private static final String TABLE_MEMBERSHIP = + "CREATE TABLE membership ( subClusterId varchar(256) NOT NULL," + + " amRMServiceAddress varchar(256) NOT NULL," + + " clientRMServiceAddress varchar(256) NOT NULL," + + " rmAdminServiceAddress varchar(256) NOT NULL," + + " rmWebServiceAddress varchar(256) NOT NULL," + + " lastHeartBeat datetime NOT NULL, state varchar(32) NOT NULL," + + " lastStartTime bigint NULL, capability varchar(6000) NOT NULL," + + " CONSTRAINT pk_subClusterId PRIMARY KEY (subClusterId))"; + + private static final String TABLE_POLICIES = + "CREATE TABLE policies ( queue varchar(256) NOT NULL," + + " policyType varchar(256) NOT NULL, params varbinary(512)," + + " CONSTRAINT pk_queue PRIMARY KEY (queue))"; + + private static final String SP_REGISTERSUBCLUSTER = + "CREATE PROCEDURE sp_registerSubCluster(" + + " IN subClusterId_IN varchar(256)," + + " IN amRMServiceAddress_IN varchar(256)," + + " IN clientRMServiceAddress_IN varchar(256)," + + " IN rmAdminServiceAddress_IN varchar(256)," + + " IN rmWebServiceAddress_IN varchar(256)," + + " IN state_IN varchar(256)," + + " IN lastStartTime_IN bigint, IN capability_IN varchar(6000)," + + " OUT rowCount_OUT int)MODIFIES SQL DATA BEGIN ATOMIC" + + " DELETE FROM membership WHERE (subClusterId = subClusterId_IN);" + + " INSERT INTO membership ( subClusterId," + + " amRMServiceAddress, clientRMServiceAddress," + + " rmAdminServiceAddress, rmWebServiceAddress," + + " lastHeartBeat, state, lastStartTime," + + " capability) VALUES ( subClusterId_IN," + + " amRMServiceAddress_IN, clientRMServiceAddress_IN," + + " rmAdminServiceAddress_IN, rmWebServiceAddress_IN," + + " NOW() AT TIME ZONE INTERVAL '0:00' HOUR TO MINUTE," + + " state_IN, lastStartTime_IN, capability_IN);" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT; END"; + + private static final String SP_DEREGISTERSUBCLUSTER = + "CREATE PROCEDURE sp_deregisterSubCluster(" + + " IN subClusterId_IN varchar(256)," + + " IN state_IN varchar(64), OUT rowCount_OUT int)" + + " MODIFIES SQL DATA BEGIN ATOMIC" + + " UPDATE membership SET state = state_IN WHERE (" + + " subClusterId = subClusterId_IN AND state != state_IN);" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT; END"; + + private static final String SP_SUBCLUSTERHEARTBEAT = + "CREATE PROCEDURE sp_subClusterHeartbeat(" + + " IN subClusterId_IN varchar(256), IN state_IN varchar(64)," + + " IN capability_IN varchar(6000), OUT rowCount_OUT int)" + + " MODIFIES SQL DATA BEGIN ATOMIC UPDATE membership" + + " SET capability = capability_IN, state = state_IN," + + " lastHeartBeat = NOW() AT TIME ZONE INTERVAL '0:00'" + + " HOUR TO MINUTE WHERE subClusterId = subClusterId_IN;" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT; END"; + + private static final String SP_GETSUBCLUSTER = + "CREATE PROCEDURE sp_getSubCluster( IN subClusterId_IN varchar(256)," + + " OUT amRMServiceAddress_OUT varchar(256)," + + " OUT clientRMServiceAddress_OUT varchar(256)," + + " OUT rmAdminServiceAddress_OUT varchar(256)," + + " OUT rmWebServiceAddress_OUT varchar(256)," + + " OUT lastHeartBeat_OUT datetime, OUT state_OUT varchar(64)," + + " OUT lastStartTime_OUT bigint," + + " OUT capability_OUT varchar(6000))" + + " MODIFIES SQL DATA BEGIN ATOMIC SELECT amRMServiceAddress," + + " clientRMServiceAddress," + + " rmAdminServiceAddress, rmWebServiceAddress," + + " lastHeartBeat, state, lastStartTime, capability" + + " INTO amRMServiceAddress_OUT, clientRMServiceAddress_OUT," + + " rmAdminServiceAddress_OUT," + + " rmWebServiceAddress_OUT, lastHeartBeat_OUT," + + " state_OUT, lastStartTime_OUT, capability_OUT" + + " FROM membership WHERE subClusterId = subClusterId_IN; END"; + + private static final String SP_GETSUBCLUSTERS = + "CREATE PROCEDURE sp_getSubClusters()" + + " MODIFIES SQL DATA DYNAMIC RESULT SETS 1 BEGIN ATOMIC" + + " DECLARE result CURSOR FOR" + + " SELECT subClusterId, amRMServiceAddress, clientRMServiceAddress," + + " rmAdminServiceAddress, rmWebServiceAddress, lastHeartBeat," + + " state, lastStartTime, capability" + + " FROM membership; OPEN result; END"; + + private static final String SP_ADDAPPLICATIONHOMESUBCLUSTER = + "CREATE PROCEDURE sp_addApplicationHomeSubCluster(" + + " IN applicationId_IN varchar(64)," + + " IN homeSubCluster_IN varchar(256)," + + " OUT storedHomeSubCluster_OUT varchar(256), OUT rowCount_OUT int)" + + " MODIFIES SQL DATA BEGIN ATOMIC" + + " INSERT INTO applicationsHomeSubCluster " + + " (applicationId,homeSubCluster) " + + " (SELECT applicationId_IN, homeSubCluster_IN" + + " FROM applicationsHomeSubCluster" + + " WHERE applicationId = applicationId_IN" + + " HAVING COUNT(*) = 0 );" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT;" + + " SELECT homeSubCluster INTO storedHomeSubCluster_OUT" + + " FROM applicationsHomeSubCluster" + + " WHERE applicationId = applicationID_IN; END"; + + private static final String SP_UPDATEAPPLICATIONHOMESUBCLUSTER = + "CREATE PROCEDURE sp_updateApplicationHomeSubCluster(" + + " IN applicationId_IN varchar(64)," + + " IN homeSubCluster_IN varchar(256), OUT rowCount_OUT int)" + + " MODIFIES SQL DATA BEGIN ATOMIC" + + " UPDATE applicationsHomeSubCluster" + + " SET homeSubCluster = homeSubCluster_IN" + + " WHERE applicationId = applicationId_IN;" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT; END"; + + private static final String SP_GETAPPLICATIONHOMESUBCLUSTER = + "CREATE PROCEDURE sp_getApplicationHomeSubCluster(" + + " IN applicationId_IN varchar(64)," + + " OUT homeSubCluster_OUT varchar(256))" + + " MODIFIES SQL DATA BEGIN ATOMIC" + + " SELECT homeSubCluster INTO homeSubCluster_OUT" + + " FROM applicationsHomeSubCluster" + + " WHERE applicationId = applicationID_IN; END"; + + private static final String SP_GETAPPLICATIONSHOMESUBCLUSTER = + "CREATE PROCEDURE sp_getApplicationsHomeSubCluster()" + + " MODIFIES SQL DATA DYNAMIC RESULT SETS 1 BEGIN ATOMIC" + + " DECLARE result CURSOR FOR" + + " SELECT applicationId, homeSubCluster" + + " FROM applicationsHomeSubCluster; OPEN result; END"; + + private static final String SP_DELETEAPPLICATIONHOMESUBCLUSTER = + "CREATE PROCEDURE sp_deleteApplicationHomeSubCluster(" + + " IN applicationId_IN varchar(64), OUT rowCount_OUT int)" + + " MODIFIES SQL DATA BEGIN ATOMIC" + + " DELETE FROM applicationsHomeSubCluster" + + " WHERE applicationId = applicationId_IN;" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT; END"; + + private static final String SP_SETPOLICYCONFIGURATION = + "CREATE PROCEDURE sp_setPolicyConfiguration(" + + " IN queue_IN varchar(256), IN policyType_IN varchar(256)," + + " IN params_IN varbinary(512), OUT rowCount_OUT int)" + + " MODIFIES SQL DATA BEGIN ATOMIC" + + " DELETE FROM policies WHERE queue = queue_IN;" + + " INSERT INTO policies (queue, policyType, params)" + + " VALUES (queue_IN, policyType_IN, params_IN);" + + " GET DIAGNOSTICS rowCount_OUT = ROW_COUNT; END"; + + private static final String SP_GETPOLICYCONFIGURATION = + "CREATE PROCEDURE sp_getPolicyConfiguration(" + + " IN queue_IN varchar(256), OUT policyType_OUT varchar(256)," + + " OUT params_OUT varbinary(512)) MODIFIES SQL DATA BEGIN ATOMIC" + + " SELECT policyType, params INTO policyType_OUT, params_OUT" + + " FROM policies WHERE queue = queue_IN; END"; + + private static final String SP_GETPOLICIESCONFIGURATIONS = + "CREATE PROCEDURE sp_getPoliciesConfigurations()" + + " MODIFIES SQL DATA DYNAMIC RESULT SETS 1 BEGIN ATOMIC" + + " DECLARE result CURSOR FOR" + + " SELECT * FROM policies; OPEN result; END"; + + @Override + public void init(Configuration conf) { + try { + super.init(conf); + } catch (YarnException e1) { + LOG.error("ERROR: failed to init HSQLDB " + e1.getMessage()); + } + try { + conn = getConnection(); + + LOG.info("Database Init: Start"); + + conn.prepareStatement(TABLE_APPLICATIONSHOMESUBCLUSTER).execute(); + conn.prepareStatement(TABLE_MEMBERSHIP).execute(); + conn.prepareStatement(TABLE_POLICIES).execute(); + + conn.prepareStatement(SP_REGISTERSUBCLUSTER).execute(); + conn.prepareStatement(SP_DEREGISTERSUBCLUSTER).execute(); + conn.prepareStatement(SP_SUBCLUSTERHEARTBEAT).execute(); + conn.prepareStatement(SP_GETSUBCLUSTER).execute(); + conn.prepareStatement(SP_GETSUBCLUSTERS).execute(); + + conn.prepareStatement(SP_ADDAPPLICATIONHOMESUBCLUSTER).execute(); + conn.prepareStatement(SP_UPDATEAPPLICATIONHOMESUBCLUSTER).execute(); + conn.prepareStatement(SP_GETAPPLICATIONHOMESUBCLUSTER).execute(); + conn.prepareStatement(SP_GETAPPLICATIONSHOMESUBCLUSTER).execute(); + conn.prepareStatement(SP_DELETEAPPLICATIONHOMESUBCLUSTER).execute(); + + conn.prepareStatement(SP_SETPOLICYCONFIGURATION).execute(); + conn.prepareStatement(SP_GETPOLICYCONFIGURATION).execute(); + conn.prepareStatement(SP_GETPOLICIESCONFIGURATIONS).execute(); + + LOG.info("Database Init: Complete"); + conn.close(); + } catch (SQLException e) { + LOG.error("ERROR: failed to inizialize HSQLDB " + e.getMessage()); + } + } + + public void closeConnection() { + try { + conn.close(); + } catch (SQLException e) { + LOG.error( + "ERROR: failed to close connection to HSQLDB DB " + e.getMessage()); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java new file mode 100644 index 00000000000..c29fc03c5b6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestMemoryFederationStateStore.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; + +/** + * Unit tests for MemoryFederationStateStore. + */ +public class TestMemoryFederationStateStore + extends FederationStateStoreBaseTest { + + @Override + protected FederationStateStore createStateStore() { + Configuration conf = new Configuration(); + super.setConf(conf); + return new MemoryFederationStateStore(); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java new file mode 100644 index 00000000000..d4e6cc53f67 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; + +/** + * Unit tests for SQLFederationStateStore. + */ +public class TestSQLFederationStateStore extends FederationStateStoreBaseTest { + + private static final String HSQLDB_DRIVER = "org.hsqldb.jdbc.JDBCDataSource"; + private static final String DATABASE_URL = "jdbc:hsqldb:mem:state"; + private static final String DATABASE_USERNAME = "SA"; + private static final String DATABASE_PASSWORD = ""; + + @Override + protected FederationStateStore createStateStore() { + + YarnConfiguration conf = new YarnConfiguration(); + + conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS, + HSQLDB_DRIVER); + conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_USERNAME, + DATABASE_USERNAME); + conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_PASSWORD, + DATABASE_PASSWORD); + conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_URL, + DATABASE_URL + System.currentTimeMillis()); + super.setConf(conf); + return new HSQLDBFederationStateStore(); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java new file mode 100644 index 00000000000..390b8037b16 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestZookeeperFederationStateStore.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.impl; + +import java.io.IOException; + +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.retry.RetryNTimes; +import org.apache.curator.test.TestingServer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.junit.After; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Unit tests for ZookeeperFederationStateStore. + */ +public class TestZookeeperFederationStateStore + extends FederationStateStoreBaseTest { + + private static final Logger LOG = + LoggerFactory.getLogger(TestZookeeperFederationStateStore.class); + + /** Zookeeper test server. */ + private static TestingServer curatorTestingServer; + private static CuratorFramework curatorFramework; + + @Before + public void before() throws IOException, YarnException { + try { + curatorTestingServer = new TestingServer(); + curatorTestingServer.start(); + String connectString = curatorTestingServer.getConnectString(); + curatorFramework = CuratorFrameworkFactory.builder() + .connectString(connectString) + .retryPolicy(new RetryNTimes(100, 100)) + .build(); + curatorFramework.start(); + + Configuration conf = new YarnConfiguration(); + conf.set(CommonConfigurationKeys.ZK_ADDRESS, connectString); + setConf(conf); + } catch (Exception e) { + LOG.error("Cannot initialize ZooKeeper store", e); + throw new IOException(e); + } + + super.before(); + } + + @After + public void after() throws Exception { + super.after(); + + curatorFramework.close(); + try { + curatorTestingServer.stop(); + } catch (IOException e) { + } + } + + @Override + protected FederationStateStore createStateStore() { + Configuration conf = new Configuration(); + super.setConf(conf); + return new ZookeeperFederationStateStore(); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java new file mode 100644 index 00000000000..cf8cf719d01 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java @@ -0,0 +1,265 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.records; + +import org.apache.hadoop.yarn.api.BasePBImplRecordsTest; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.AddApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.AddApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.DeleteApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.DeleteApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetApplicationsHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterInfoRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterInfoResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPoliciesConfigurationsRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPoliciesConfigurationsResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPolicyConfigurationRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClusterPolicyConfigurationResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClustersInfoRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.GetSubClustersInfoResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SetSubClusterPolicyConfigurationRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SetSubClusterPolicyConfigurationResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterHeartbeatRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterHeartbeatResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterInfoProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterRegisterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterRegisterResponseProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.UpdateApplicationHomeSubClusterRequestProto; +import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.UpdateApplicationHomeSubClusterResponseProto; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.AddApplicationHomeSubClusterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.AddApplicationHomeSubClusterResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.DeleteApplicationHomeSubClusterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.DeleteApplicationHomeSubClusterResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetApplicationHomeSubClusterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetApplicationHomeSubClusterResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetApplicationsHomeSubClusterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetApplicationsHomeSubClusterResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClusterInfoRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClusterInfoResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClusterPoliciesConfigurationsRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClusterPoliciesConfigurationsResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClusterPolicyConfigurationRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClusterPolicyConfigurationResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClustersInfoRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.GetSubClustersInfoResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SetSubClusterPolicyConfigurationRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SetSubClusterPolicyConfigurationResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterDeregisterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterDeregisterResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterHeartbeatRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterHeartbeatResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterIdPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterInfoPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterRegisterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.SubClusterRegisterResponsePBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.UpdateApplicationHomeSubClusterRequestPBImpl; +import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.UpdateApplicationHomeSubClusterResponsePBImpl; +import org.apache.hadoop.yarn.server.records.Version; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test class for federation protocol records. + */ +public class TestFederationProtocolRecords extends BasePBImplRecordsTest { + + @BeforeClass + public static void setup() throws Exception { + generateByNewInstance(ApplicationId.class); + generateByNewInstance(Version.class); + generateByNewInstance(SubClusterId.class); + generateByNewInstance(SubClusterInfo.class); + generateByNewInstance(ApplicationHomeSubCluster.class); + generateByNewInstance(SubClusterPolicyConfiguration.class); + } + + @Test + public void testSubClusterId() throws Exception { + validatePBImplRecord(SubClusterIdPBImpl.class, SubClusterIdProto.class); + } + + @Test + public void testSubClusterInfo() throws Exception { + validatePBImplRecord(SubClusterInfoPBImpl.class, SubClusterInfoProto.class); + } + + @Test + public void testSubClusterRegisterRequest() throws Exception { + validatePBImplRecord(SubClusterRegisterRequestPBImpl.class, + SubClusterRegisterRequestProto.class); + } + + @Test + public void testSubClusterRegisterResponse() throws Exception { + validatePBImplRecord(SubClusterRegisterResponsePBImpl.class, + SubClusterRegisterResponseProto.class); + } + + @Test + public void testSubClusterDeregisterRequest() throws Exception { + validatePBImplRecord(SubClusterDeregisterRequestPBImpl.class, + SubClusterDeregisterRequestProto.class); + } + + @Test + public void testSubClusterDeregisterResponse() throws Exception { + validatePBImplRecord(SubClusterDeregisterResponsePBImpl.class, + SubClusterDeregisterResponseProto.class); + } + + @Test + public void testSubClusterHeartbeatRequest() throws Exception { + validatePBImplRecord(SubClusterHeartbeatRequestPBImpl.class, + SubClusterHeartbeatRequestProto.class); + } + + @Test + public void testSubClusterHeartbeatResponse() throws Exception { + validatePBImplRecord(SubClusterHeartbeatResponsePBImpl.class, + SubClusterHeartbeatResponseProto.class); + } + + @Test + public void testGetSubClusterRequest() throws Exception { + validatePBImplRecord(GetSubClusterInfoRequestPBImpl.class, + GetSubClusterInfoRequestProto.class); + } + + @Test + public void testGetSubClusterResponse() throws Exception { + validatePBImplRecord(GetSubClusterInfoResponsePBImpl.class, + GetSubClusterInfoResponseProto.class); + } + + @Test + public void testGetSubClustersInfoRequest() throws Exception { + validatePBImplRecord(GetSubClustersInfoRequestPBImpl.class, + GetSubClustersInfoRequestProto.class); + } + + @Test + public void testGetSubClustersInfoResponse() throws Exception { + validatePBImplRecord(GetSubClustersInfoResponsePBImpl.class, + GetSubClustersInfoResponseProto.class); + } + + @Test + public void testAddApplicationHomeSubClusterRequest() throws Exception { + validatePBImplRecord(AddApplicationHomeSubClusterRequestPBImpl.class, + AddApplicationHomeSubClusterRequestProto.class); + } + + @Test + public void testAddApplicationHomeSubClusterResponse() throws Exception { + validatePBImplRecord(AddApplicationHomeSubClusterResponsePBImpl.class, + AddApplicationHomeSubClusterResponseProto.class); + } + + @Test + public void testUpdateApplicationHomeSubClusterRequest() throws Exception { + validatePBImplRecord(UpdateApplicationHomeSubClusterRequestPBImpl.class, + UpdateApplicationHomeSubClusterRequestProto.class); + } + + @Test + public void testUpdateApplicationHomeSubClusterResponse() throws Exception { + validatePBImplRecord(UpdateApplicationHomeSubClusterResponsePBImpl.class, + UpdateApplicationHomeSubClusterResponseProto.class); + } + + @Test + public void testGetApplicationHomeSubClusterRequest() throws Exception { + validatePBImplRecord(GetApplicationHomeSubClusterRequestPBImpl.class, + GetApplicationHomeSubClusterRequestProto.class); + } + + @Test + public void testGetApplicationHomeSubClusterResponse() throws Exception { + validatePBImplRecord(GetApplicationHomeSubClusterResponsePBImpl.class, + GetApplicationHomeSubClusterResponseProto.class); + } + + @Test + public void testGetApplicationsHomeSubClusterRequest() throws Exception { + validatePBImplRecord(GetApplicationsHomeSubClusterRequestPBImpl.class, + GetApplicationsHomeSubClusterRequestProto.class); + } + + @Test + public void testGetApplicationsHomeSubClusterResponse() throws Exception { + validatePBImplRecord(GetApplicationsHomeSubClusterResponsePBImpl.class, + GetApplicationsHomeSubClusterResponseProto.class); + } + + @Test + public void testDeleteApplicationHomeSubClusterRequest() throws Exception { + validatePBImplRecord(DeleteApplicationHomeSubClusterRequestPBImpl.class, + DeleteApplicationHomeSubClusterRequestProto.class); + } + + @Test + public void testDeleteApplicationHomeSubClusterResponse() throws Exception { + validatePBImplRecord(DeleteApplicationHomeSubClusterResponsePBImpl.class, + DeleteApplicationHomeSubClusterResponseProto.class); + } + + @Test + public void testGetSubClusterPolicyConfigurationRequest() throws Exception { + validatePBImplRecord(GetSubClusterPolicyConfigurationRequestPBImpl.class, + GetSubClusterPolicyConfigurationRequestProto.class); + } + + @Test + public void testGetSubClusterPolicyConfigurationResponse() throws Exception { + validatePBImplRecord(GetSubClusterPolicyConfigurationResponsePBImpl.class, + GetSubClusterPolicyConfigurationResponseProto.class); + } + + @Test + public void testSetSubClusterPolicyConfigurationRequest() throws Exception { + validatePBImplRecord(SetSubClusterPolicyConfigurationRequestPBImpl.class, + SetSubClusterPolicyConfigurationRequestProto.class); + } + + @Test + public void testSetSubClusterPolicyConfigurationResponse() throws Exception { + validatePBImplRecord(SetSubClusterPolicyConfigurationResponsePBImpl.class, + SetSubClusterPolicyConfigurationResponseProto.class); + } + + @Test + public void testGetSubClusterPoliciesConfigurationsRequest() + throws Exception { + validatePBImplRecord(GetSubClusterPoliciesConfigurationsRequestPBImpl.class, + GetSubClusterPoliciesConfigurationsRequestProto.class); + } + + @Test + public void testGetSubClusterPoliciesConfigurationsResponse() + throws Exception { + validatePBImplRecord( + GetSubClusterPoliciesConfigurationsResponsePBImpl.class, + GetSubClusterPoliciesConfigurationsResponseProto.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java new file mode 100644 index 00000000000..5a5703e6f65 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java @@ -0,0 +1,1260 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.federation.store.utils; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Unit tests for FederationApplicationInputValidator, + * FederationMembershipInputValidator, and FederationPolicyInputValidator. + */ +public class TestFederationStateStoreInputValidator { + + private static final Logger LOG = + LoggerFactory.getLogger(TestFederationStateStoreInputValidator.class); + + private static SubClusterId subClusterId; + private static String amRMServiceAddress; + private static String clientRMServiceAddress; + private static String rmAdminServiceAddress; + private static String rmWebServiceAddress; + private static int lastHeartBeat; + private static SubClusterState stateNew; + private static SubClusterState stateLost; + private static ApplicationId appId; + private static int lastStartTime; + private static String capability; + private static String queue; + private static String type; + private static ByteBuffer params; + + private static SubClusterId subClusterIdInvalid; + private static SubClusterId subClusterIdNull; + + private static int lastHeartBeatNegative; + private static int lastStartTimeNegative; + + private static SubClusterState stateNull; + private static ApplicationId appIdNull; + + private static String capabilityNull; + private static String capabilityEmpty; + + private static String addressNull; + private static String addressEmpty; + private static String addressWrong; + private static String addressWrongPort; + + private static String queueEmpty; + private static String queueNull; + + private static String typeEmpty; + private static String typeNull; + + @BeforeClass + public static void setUp() { + subClusterId = SubClusterId.newInstance("abc"); + amRMServiceAddress = "localhost:8032"; + clientRMServiceAddress = "localhost:8034"; + rmAdminServiceAddress = "localhost:8031"; + rmWebServiceAddress = "localhost:8088"; + lastHeartBeat = 1000; + stateNew = SubClusterState.SC_NEW; + stateLost = SubClusterState.SC_LOST; + lastStartTime = 1000; + capability = "Memory VCores"; + appId = ApplicationId.newInstance(lastStartTime, 1); + queue = "default"; + type = "random"; + params = ByteBuffer.allocate(10); + params.put((byte) 0xFF); + + subClusterIdInvalid = SubClusterId.newInstance(""); + subClusterIdNull = null; + + lastHeartBeatNegative = -10; + lastStartTimeNegative = -10; + + stateNull = null; + appIdNull = null; + + capabilityNull = null; + capabilityEmpty = ""; + + addressNull = null; + addressEmpty = ""; + addressWrong = "AddressWrong"; + addressWrongPort = "Address:WrongPort"; + + queueEmpty = ""; + queueNull = null; + + typeEmpty = ""; + typeNull = null; + } + + @Test + public void testValidateSubClusterRegisterRequest() { + + // Execution with valid inputs + + SubClusterInfo subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + SubClusterRegisterRequest request = null; + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubClusterRegister Request.")); + } + + // Execution with null SubClusterInfo + + subClusterInfo = null; + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Information.")); + } + + // Execution with Null SubClusterId + + subClusterInfo = + SubClusterInfo.newInstance(subClusterIdNull, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Id information.")); + } + + // Execution with Invalid SubClusterId + + subClusterInfo = + SubClusterInfo.newInstance(subClusterIdInvalid, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid SubCluster Id information.")); + } + + // Execution with Null State + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster State information.")); + } + + // Execution with Null Capability + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capabilityNull); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with Empty Capability + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capabilityEmpty); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + } + + @Test + public void testValidateSubClusterRegisterRequestTimestamp() { + + // Execution with Negative Last Heartbeat + + SubClusterInfo subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeatNegative, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid timestamp information.")); + } + + // Execution with Negative Last StartTime + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTimeNegative, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid timestamp information.")); + } + } + + @Test + public void testValidateSubClusterRegisterRequestAddress() { + // Execution with Null Address for amRMServiceAddress + + SubClusterInfo subClusterInfo = + SubClusterInfo.newInstance(subClusterId, addressNull, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Empty Address for amRMServiceAddress + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, addressEmpty, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Null Address for clientRMServiceAddress + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + addressNull, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Empty Address for clientRMServiceAddress + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + addressEmpty, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Null Address for rmAdminServiceAddress + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, addressNull, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Empty Address for rmAdminServiceAddress + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, addressEmpty, rmWebServiceAddress, + lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Null Address for rmWebServiceAddress + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, + amRMServiceAddress, clientRMServiceAddress, rmAdminServiceAddress, + addressNull, lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + + // Execution with Empty Address for rmWebServiceAddress + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, + amRMServiceAddress, clientRMServiceAddress, rmAdminServiceAddress, + addressEmpty, lastHeartBeat, stateNew, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage() + .startsWith("Missing SubCluster Endpoint information.")); + } + } + + @Test + public void testValidateSubClusterRegisterRequestAddressInvalid() { + + // Address is not in host:port format for amRMService + + SubClusterInfo subClusterInfo = + SubClusterInfo.newInstance(subClusterId, addressWrong, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Address is not in host:port format for clientRMService + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + addressWrong, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Address is not in host:port format for rmAdminService + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, addressWrong, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Address is not in host:port format for rmWebService + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, + amRMServiceAddress, clientRMServiceAddress, rmAdminServiceAddress, + addressWrong, lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Port is not an integer for amRMService + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, addressWrongPort, + clientRMServiceAddress, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Port is not an integer for clientRMService + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + addressWrongPort, rmAdminServiceAddress, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Port is not an integer for rmAdminService + + subClusterInfo = + SubClusterInfo.newInstance(subClusterId, amRMServiceAddress, + clientRMServiceAddress, addressWrongPort, rmWebServiceAddress, + lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + // Port is not an integer for rmWebService + + subClusterInfo = SubClusterInfo.newInstance(subClusterId, + amRMServiceAddress, clientRMServiceAddress, rmAdminServiceAddress, + addressWrongPort, lastHeartBeat, stateNull, lastStartTime, capability); + try { + SubClusterRegisterRequest request = + SubClusterRegisterRequest.newInstance(subClusterInfo); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().contains("valid host:port authority:")); + } + + } + + @Test + public void testValidateSubClusterDeregisterRequest() { + + // Execution with valid inputs + + try { + SubClusterDeregisterRequest request = + SubClusterDeregisterRequest.newInstance(subClusterId, stateLost); + FederationMembershipStateStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + SubClusterDeregisterRequest request = null; + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubClusterDeregister Request.")); + } + + // Execution with null SubClusterId + + try { + SubClusterDeregisterRequest request = + SubClusterDeregisterRequest.newInstance(subClusterIdNull, stateLost); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Id information.")); + } + + // Execution with invalid SubClusterId + + try { + SubClusterDeregisterRequest request = SubClusterDeregisterRequest + .newInstance(subClusterIdInvalid, stateLost); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid SubCluster Id information.")); + } + + // Execution with null SubClusterState + + try { + SubClusterDeregisterRequest request = + SubClusterDeregisterRequest.newInstance(subClusterId, stateNull); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster State information.")); + } + + // Execution with invalid SubClusterState + + try { + SubClusterDeregisterRequest request = + SubClusterDeregisterRequest.newInstance(subClusterId, stateNew); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue(e.getMessage().startsWith("Invalid non-final state: ")); + } + } + + @Test + public void testSubClusterHeartbeatRequest() { + + // Execution with valid inputs + + try { + SubClusterHeartbeatRequest request = SubClusterHeartbeatRequest + .newInstance(subClusterId, lastHeartBeat, stateLost, capability); + FederationMembershipStateStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + SubClusterHeartbeatRequest request = null; + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubClusterHeartbeat Request.")); + } + + // Execution with null SubClusterId + + try { + SubClusterHeartbeatRequest request = SubClusterHeartbeatRequest + .newInstance(subClusterIdNull, lastHeartBeat, stateLost, capability); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Id information.")); + } + + // Execution with invalid SubClusterId + + try { + SubClusterHeartbeatRequest request = + SubClusterHeartbeatRequest.newInstance(subClusterIdInvalid, + lastHeartBeat, stateLost, capability); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid SubCluster Id information.")); + } + + // Execution with null SubClusterState + + try { + SubClusterHeartbeatRequest request = SubClusterHeartbeatRequest + .newInstance(subClusterId, lastHeartBeat, stateNull, capability); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster State information.")); + } + + // Execution with negative Last Heartbeat + + try { + SubClusterHeartbeatRequest request = + SubClusterHeartbeatRequest.newInstance(subClusterId, + lastHeartBeatNegative, stateLost, capability); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid timestamp information.")); + } + + // Execution with null Capability + + try { + SubClusterHeartbeatRequest request = SubClusterHeartbeatRequest + .newInstance(subClusterId, lastHeartBeat, stateLost, capabilityNull); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid capability information.")); + } + + // Execution with empty Capability + + try { + SubClusterHeartbeatRequest request = SubClusterHeartbeatRequest + .newInstance(subClusterId, lastHeartBeat, stateLost, capabilityEmpty); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid capability information.")); + } + } + + @Test + public void testGetSubClusterInfoRequest() { + + // Execution with valid inputs + + try { + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterId); + FederationMembershipStateStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + GetSubClusterInfoRequest request = null; + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing GetSubClusterInfo Request.")); + } + + // Execution with null SubClusterId + + try { + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterIdNull); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Id information.")); + } + + // Execution with invalid SubClusterId + + try { + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterIdInvalid); + FederationMembershipStateStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid SubCluster Id information.")); + } + } + + @Test + public void testAddApplicationHomeSubClusterRequest() { + + // Execution with valid inputs + + ApplicationHomeSubCluster applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterId); + try { + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + AddApplicationHomeSubClusterRequest request = null; + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Missing AddApplicationHomeSubCluster Request.")); + } + + // Execution with null ApplicationHomeSubCluster + + applicationHomeSubCluster = null; + try { + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing ApplicationHomeSubCluster Info.")); + } + + // Execution with null SubClusterId + + applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterIdNull); + try { + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Id information.")); + } + + // Execution with invalid SubClusterId + + applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterIdInvalid); + try { + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid SubCluster Id information.")); + } + + // Execution with Null ApplicationId + + applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appIdNull, subClusterId); + try { + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Application Id.")); + } + } + + @Test + public void testUpdateApplicationHomeSubClusterRequest() { + + // Execution with valid inputs + + ApplicationHomeSubCluster applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterId); + try { + UpdateApplicationHomeSubClusterRequest request = + UpdateApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + UpdateApplicationHomeSubClusterRequest request = null; + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Missing UpdateApplicationHomeSubCluster Request.")); + } + + // Execution with null ApplicationHomeSubCluster + + applicationHomeSubCluster = null; + try { + UpdateApplicationHomeSubClusterRequest request = + UpdateApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing ApplicationHomeSubCluster Info.")); + } + + // Execution with null SubClusteId + + applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterIdNull); + try { + UpdateApplicationHomeSubClusterRequest request = + UpdateApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Missing SubCluster Id information.")); + } + + // Execution with invalid SubClusterId + + applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterIdInvalid); + try { + UpdateApplicationHomeSubClusterRequest request = + UpdateApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + LOG.info(e.getMessage()); + Assert.assertTrue( + e.getMessage().startsWith("Invalid SubCluster Id information.")); + } + + // Execution with null ApplicationId + + applicationHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appIdNull, subClusterId); + try { + UpdateApplicationHomeSubClusterRequest request = + UpdateApplicationHomeSubClusterRequest + .newInstance(applicationHomeSubCluster); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Application Id.")); + } + } + + @Test + public void testGetApplicationHomeSubClusterRequest() { + + // Execution with valid inputs + + try { + GetApplicationHomeSubClusterRequest request = + GetApplicationHomeSubClusterRequest.newInstance(appId); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + GetApplicationHomeSubClusterRequest request = null; + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Missing GetApplicationHomeSubCluster Request.")); + } + + // Execution with null ApplicationId + + try { + GetApplicationHomeSubClusterRequest request = + GetApplicationHomeSubClusterRequest.newInstance(appIdNull); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Application Id.")); + } + + } + + @Test + public void testDeleteApplicationHomeSubClusterRequestNull() { + + // Execution with valid inputs + + try { + DeleteApplicationHomeSubClusterRequest request = + DeleteApplicationHomeSubClusterRequest.newInstance(appId); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + DeleteApplicationHomeSubClusterRequest request = null; + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Missing DeleteApplicationHomeSubCluster Request.")); + } + + // Execution with null ApplicationId + + try { + DeleteApplicationHomeSubClusterRequest request = + DeleteApplicationHomeSubClusterRequest.newInstance(appIdNull); + FederationApplicationHomeSubClusterStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Application Id.")); + } + + } + + @Test + public void testGetSubClusterPolicyConfigurationRequest() { + + // Execution with valid inputs + + try { + GetSubClusterPolicyConfigurationRequest request = + GetSubClusterPolicyConfigurationRequest.newInstance(queue); + FederationPolicyStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + GetSubClusterPolicyConfigurationRequest request = null; + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Missing GetSubClusterPolicyConfiguration Request.")); + } + + // Execution with null queue id + + try { + GetSubClusterPolicyConfigurationRequest request = + GetSubClusterPolicyConfigurationRequest.newInstance(queueNull); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Queue.")); + } + + // Execution with empty queue id + + try { + GetSubClusterPolicyConfigurationRequest request = + GetSubClusterPolicyConfigurationRequest.newInstance(queueEmpty); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Queue.")); + } + + } + + @Test + public void testSetSubClusterPolicyConfigurationRequest() { + + // Execution with valid inputs + + try { + SubClusterPolicyConfiguration policy = + SubClusterPolicyConfiguration.newInstance(queue, type, params); + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest.newInstance(policy); + FederationPolicyStoreInputValidator + .validate(request); + } catch (FederationStateStoreInvalidInputException e) { + Assert.fail(e.getMessage()); + } + + // Execution with null request + + try { + SetSubClusterPolicyConfigurationRequest request = null; + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage() + .startsWith("Missing SetSubClusterPolicyConfiguration Request.")); + } + + // Execution with null SubClusterPolicyConfiguration + + try { + SubClusterPolicyConfiguration policy = null; + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest.newInstance(policy); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing SubClusterPolicyConfiguration.")); + } + + // Execution with null queue id + + try { + SubClusterPolicyConfiguration policy = + SubClusterPolicyConfiguration.newInstance(queueNull, type, params); + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest.newInstance(policy); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Queue.")); + } + + // Execution with empty queue id + + try { + SubClusterPolicyConfiguration policy = + SubClusterPolicyConfiguration.newInstance(queueEmpty, type, params); + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest.newInstance(policy); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Queue.")); + } + + // Execution with null policy type + + try { + SubClusterPolicyConfiguration policy = + SubClusterPolicyConfiguration.newInstance(queue, typeNull, params); + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest.newInstance(policy); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Policy Type.")); + } + + // Execution with empty policy type + + try { + SubClusterPolicyConfiguration policy = + SubClusterPolicyConfiguration.newInstance(queue, typeEmpty, params); + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest.newInstance(policy); + FederationPolicyStoreInputValidator + .validate(request); + Assert.fail(); + } catch (FederationStateStoreInvalidInputException e) { + Assert.assertTrue(e.getMessage().startsWith("Missing Policy Type.")); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java new file mode 100644 index 00000000000..acc14dd9f79 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.federation.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext; +import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo; +import org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.*; +import org.apache.hadoop.yarn.util.Records; + +import java.io.File; +import java.net.URL; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Support class providing common initialization methods to test federation + * policies. + */ +public final class FederationPoliciesTestUtil { + + private FederationPoliciesTestUtil() { + // disabled. + } + + private static final String FEDR_NODE_PREFIX = "fedr-test-node-"; + + + public static List createResourceRequests(String[] hosts, + int memory, int vCores, int priority, int containers, + String labelExpression, boolean relaxLocality) throws YarnException { + List reqs = new ArrayList(); + for (String host : hosts) { + ResourceRequest hostReq = + createResourceRequest(host, memory, vCores, priority, containers, + labelExpression, relaxLocality); + reqs.add(hostReq); + ResourceRequest rackReq = + createResourceRequest("/default-rack", memory, vCores, priority, + containers, labelExpression, relaxLocality); + reqs.add(rackReq); + } + + ResourceRequest offRackReq = + createResourceRequest(ResourceRequest.ANY, memory, vCores, priority, + containers, labelExpression, relaxLocality); + reqs.add(offRackReq); + return reqs; + } + + protected static ResourceRequest createResourceRequest(String resource, + int memory, int vCores, int priority, int containers, + boolean relaxLocality) throws YarnException { + return createResourceRequest(resource, memory, vCores, priority, containers, + null, relaxLocality); + } + + @SuppressWarnings("checkstyle:parameternumber") + public static ResourceRequest createResourceRequest(long id, String resource, + int memory, int vCores, int priority, int containers, + String labelExpression, boolean relaxLocality) throws YarnException { + ResourceRequest out = + createResourceRequest(resource, memory, vCores, priority, containers, + labelExpression, relaxLocality); + out.setAllocationRequestId(id); + return out; + } + + public static ResourceRequest createResourceRequest(String resource, + int memory, int vCores, int priority, int containers, + String labelExpression, boolean relaxLocality) throws YarnException { + ResourceRequest req = Records.newRecord(ResourceRequest.class); + req.setResourceName(resource); + req.setNumContainers(containers); + Priority pri = Records.newRecord(Priority.class); + pri.setPriority(priority); + req.setPriority(pri); + Resource capability = Records.newRecord(Resource.class); + capability.setMemorySize(memory); + capability.setVirtualCores(vCores); + req.setCapability(capability); + if (labelExpression != null) { + req.setNodeLabelExpression(labelExpression); + } + req.setRelaxLocality(relaxLocality); + return req; + } + + public static void initializePolicyContext( + FederationPolicyInitializationContext fpc, ConfigurableFederationPolicy + policy, WeightedPolicyInfo policyInfo, + Map activeSubclusters) + throws YarnException { + ByteBuffer buf = policyInfo.toByteBuffer(); + fpc.setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration + .newInstance("queue1", policy.getClass().getCanonicalName(), buf)); + FederationStateStoreFacade facade = FederationStateStoreFacade + .getInstance(); + FederationStateStore fss = mock(FederationStateStore.class); + + if (activeSubclusters == null) { + activeSubclusters = new HashMap(); + } + GetSubClustersInfoResponse response = GetSubClustersInfoResponse + .newInstance(new ArrayList(activeSubclusters.values())); + + when(fss.getSubClusters(any())).thenReturn(response); + facade.reinitialize(fss, new Configuration()); + fpc.setFederationStateStoreFacade(facade); + policy.reinitialize(fpc); + } + + public static void initializePolicyContext( + ConfigurableFederationPolicy policy, + WeightedPolicyInfo policyInfo, Map activeSubclusters) throws YarnException { + FederationPolicyInitializationContext context = + new FederationPolicyInitializationContext(null, initResolver(), + initFacade(), SubClusterId.newInstance("homesubcluster")); + initializePolicyContext(context, policy, policyInfo, activeSubclusters); + } + + /** + * Initialize a {@link SubClusterResolver}. + * + * @return a subcluster resolver for tests. + */ + public static SubClusterResolver initResolver() { + YarnConfiguration conf = new YarnConfiguration(); + SubClusterResolver resolver = + new DefaultSubClusterResolverImpl(); + URL url = + Thread.currentThread().getContextClassLoader().getResource("nodes"); + if (url == null) { + throw new RuntimeException( + "Could not find 'nodes' dummy file in classpath"); + } + // This will get rid of the beginning '/' in the url in Windows env + File file = new File(url.getPath()); + + conf.set(YarnConfiguration.FEDERATION_MACHINE_LIST, file.getPath()); + resolver.setConf(conf); + resolver.load(); + return resolver; + } + + /** + * Initialiaze a main-memory {@link FederationStateStoreFacade} used for + * testing, wiht a mock resolver. + * + * @param subClusterInfos the list of subclusters to be served on + * getSubClusters invocations. + * + * @return the facade. + * + * @throws YarnException in case the initialization is not successful. + */ + + public static FederationStateStoreFacade initFacade( + List subClusterInfos, SubClusterPolicyConfiguration + policyConfiguration) throws YarnException { + FederationStateStoreFacade goodFacade = FederationStateStoreFacade + .getInstance(); + FederationStateStore fss = mock(FederationStateStore.class); + GetSubClustersInfoResponse response = GetSubClustersInfoResponse + .newInstance(subClusterInfos); + when(fss.getSubClusters(any())).thenReturn(response); + + List configurations = new ArrayList<>(); + configurations.add(policyConfiguration); + + GetSubClusterPoliciesConfigurationsResponse policiesResponse = + GetSubClusterPoliciesConfigurationsResponse + .newInstance(configurations); + when(fss.getPoliciesConfigurations(any())).thenReturn(policiesResponse); + + GetSubClusterPolicyConfigurationResponse policyResponse = + GetSubClusterPolicyConfigurationResponse + .newInstance(policyConfiguration); + when(fss.getPolicyConfiguration(any())).thenReturn(policyResponse); + + goodFacade.reinitialize(fss, new Configuration()); + return goodFacade; + } + + /** + * Initialiaze a main-memory {@link FederationStateStoreFacade} used for + * testing, wiht a mock resolver. + * + * @return the facade. + * + * @throws YarnException in case the initialization is not successful. + */ + public static FederationStateStoreFacade initFacade() throws YarnException { + return initFacade(new ArrayList<>(), mock(SubClusterPolicyConfiguration + .class)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreTestUtil.java new file mode 100644 index 00000000000..5d4c8d5ad78 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreTestUtil.java @@ -0,0 +1,181 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.utils; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.util.MonotonicClock; + +/** + * Utility class for FederationStateStore unit tests. + */ +public class FederationStateStoreTestUtil { + + private static final MonotonicClock CLOCK = new MonotonicClock(); + + public static final String SC_PREFIX = "SC-"; + public static final String Q_PREFIX = "queue-"; + public static final String POLICY_PREFIX = "policy-"; + public static final String INVALID = "dummy"; + + private FederationStateStore stateStore; + + public FederationStateStoreTestUtil(FederationStateStore stateStore) { + this.stateStore = stateStore; + } + + private SubClusterInfo createSubClusterInfo(SubClusterId subClusterId) { + + String amRMAddress = "1.2.3.4:1"; + String clientRMAddress = "1.2.3.4:2"; + String rmAdminAddress = "1.2.3.4:3"; + String webAppAddress = "1.2.3.4:4"; + + return SubClusterInfo.newInstance(subClusterId, amRMAddress, + clientRMAddress, rmAdminAddress, webAppAddress, + SubClusterState.SC_RUNNING, CLOCK.getTime(), "capability"); + } + + public void registerSubCluster(SubClusterId subClusterId) + throws YarnException { + + SubClusterInfo subClusterInfo = createSubClusterInfo(subClusterId); + stateStore.registerSubCluster( + SubClusterRegisterRequest.newInstance(subClusterInfo)); + } + + public void registerSubClusters(int numSubClusters) throws YarnException { + + for (int i = 0; i < numSubClusters; i++) { + registerSubCluster(SubClusterId.newInstance(SC_PREFIX + i)); + } + } + + private void addApplicationHomeSC(ApplicationId appId, + SubClusterId subClusterId) throws YarnException { + ApplicationHomeSubCluster ahsc = + ApplicationHomeSubCluster.newInstance(appId, subClusterId); + AddApplicationHomeSubClusterRequest request = + AddApplicationHomeSubClusterRequest.newInstance(ahsc); + stateStore.addApplicationHomeSubCluster(request); + } + + public void addAppsHomeSC(long clusterTs, int numApps) throws YarnException { + for (int i = 0; i < numApps; i++) { + addApplicationHomeSC(ApplicationId.newInstance(clusterTs, i), + SubClusterId.newInstance(SC_PREFIX + i)); + } + } + + public List getAllSubClusterIds( + boolean filterInactiveSubclusters) throws YarnException { + + List infos = stateStore + .getSubClusters( + GetSubClustersInfoRequest.newInstance(filterInactiveSubclusters)) + .getSubClusters(); + List ids = new ArrayList<>(); + for (SubClusterInfo s : infos) { + ids.add(s.getSubClusterId()); + } + + return ids; + } + + private SubClusterPolicyConfiguration createSCPolicyConf(String queueName, + String policyType) { + return SubClusterPolicyConfiguration.newInstance(queueName, policyType, + ByteBuffer.allocate(1)); + } + + private void setPolicyConf(String queue, String policyType) + throws YarnException { + SetSubClusterPolicyConfigurationRequest request = + SetSubClusterPolicyConfigurationRequest + .newInstance(createSCPolicyConf(queue, policyType)); + stateStore.setPolicyConfiguration(request); + } + + public void addPolicyConfigs(int numQueues) throws YarnException { + + for (int i = 0; i < numQueues; i++) { + setPolicyConf(Q_PREFIX + i, POLICY_PREFIX + i); + } + } + + public SubClusterInfo querySubClusterInfo(SubClusterId subClusterId) + throws YarnException { + GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterId); + return stateStore.getSubCluster(request).getSubClusterInfo(); + } + + public SubClusterId queryApplicationHomeSC(ApplicationId appId) + throws YarnException { + GetApplicationHomeSubClusterRequest request = + GetApplicationHomeSubClusterRequest.newInstance(appId); + + GetApplicationHomeSubClusterResponse response = + stateStore.getApplicationHomeSubCluster(request); + + return response.getApplicationHomeSubCluster().getHomeSubCluster(); + } + + public SubClusterPolicyConfiguration queryPolicyConfiguration(String queue) + throws YarnException { + GetSubClusterPolicyConfigurationRequest request = + GetSubClusterPolicyConfigurationRequest.newInstance(queue); + + GetSubClusterPolicyConfigurationResponse result = + stateStore.getPolicyConfiguration(request); + return result.getPolicyConfiguration(); + } + + public void deregisterAllSubClusters() throws YarnException { + for (SubClusterId sc : getAllSubClusterIds(true)) { + deRegisterSubCluster(sc); + } + } + + private void deRegisterSubCluster(SubClusterId subClusterId) + throws YarnException { + stateStore.deregisterSubCluster(SubClusterDeregisterRequest + .newInstance(subClusterId, SubClusterState.SC_UNREGISTERED)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java new file mode 100644 index 00000000000..6328122e77d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.utils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Unit tests for FederationStateStoreFacade. + */ +@RunWith(Parameterized.class) +public class TestFederationStateStoreFacade { + + @Parameters + @SuppressWarnings({"NoWhitespaceAfter"}) + public static Collection getParameters() { + return Arrays + .asList(new Boolean[][] { { Boolean.FALSE }, { Boolean.TRUE } }); + } + + private final long clusterTs = System.currentTimeMillis(); + private final int numSubClusters = 3; + private final int numApps = 5; + private final int numQueues = 2; + + private Configuration conf; + private FederationStateStore stateStore; + private FederationStateStoreTestUtil stateStoreTestUtil; + private FederationStateStoreFacade facade = + FederationStateStoreFacade.getInstance(); + + public TestFederationStateStoreFacade(Boolean isCachingEnabled) { + conf = new Configuration(); + if (!(isCachingEnabled.booleanValue())) { + conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0); + } + } + + @Before + public void setUp() throws IOException, YarnException { + stateStore = new MemoryFederationStateStore(); + stateStore.init(conf); + facade.reinitialize(stateStore, conf); + // hydrate the store + stateStoreTestUtil = new FederationStateStoreTestUtil(stateStore); + stateStoreTestUtil.registerSubClusters(numSubClusters); + stateStoreTestUtil.addAppsHomeSC(clusterTs, numApps); + stateStoreTestUtil.addPolicyConfigs(numQueues); + } + + @After + public void tearDown() throws Exception { + stateStore.close(); + stateStore = null; + } + + @Test + public void testGetSubCluster() throws YarnException { + for (int i = 0; i < numSubClusters; i++) { + SubClusterId subClusterId = + SubClusterId.newInstance(FederationStateStoreTestUtil.SC_PREFIX + i); + Assert.assertEquals(stateStoreTestUtil.querySubClusterInfo(subClusterId), + facade.getSubCluster(subClusterId)); + } + } + + @Test + public void testInvalidGetSubCluster() throws YarnException { + SubClusterId subClusterId = + SubClusterId.newInstance(FederationStateStoreTestUtil.INVALID); + Assert.assertNull(facade.getSubCluster(subClusterId)); + } + + @Test + public void testGetSubClusterFlushCache() throws YarnException { + for (int i = 0; i < numSubClusters; i++) { + SubClusterId subClusterId = + SubClusterId.newInstance(FederationStateStoreTestUtil.SC_PREFIX + i); + Assert.assertEquals(stateStoreTestUtil.querySubClusterInfo(subClusterId), + facade.getSubCluster(subClusterId, true)); + } + } + + @Test + public void testGetSubClusters() throws YarnException { + Map subClusters = + facade.getSubClusters(false); + for (SubClusterId subClusterId : subClusters.keySet()) { + Assert.assertEquals(stateStoreTestUtil.querySubClusterInfo(subClusterId), + subClusters.get(subClusterId)); + } + } + + @Test + public void testGetPolicyConfiguration() throws YarnException { + for (int i = 0; i < numQueues; i++) { + String queue = FederationStateStoreTestUtil.Q_PREFIX + i; + Assert.assertEquals(stateStoreTestUtil.queryPolicyConfiguration(queue), + facade.getPolicyConfiguration(queue)); + } + } + + @Test + public void testInvalidGetPolicyConfiguration() throws YarnException { + Assert.assertNull( + facade.getPolicyConfiguration(FederationStateStoreTestUtil.INVALID)); + } + + @Test + public void testGetPoliciesConfigurations() throws YarnException { + Map queuePolicies = + facade.getPoliciesConfigurations(); + for (String queue : queuePolicies.keySet()) { + Assert.assertEquals(stateStoreTestUtil.queryPolicyConfiguration(queue), + queuePolicies.get(queue)); + } + } + + @Test + public void testGetHomeSubClusterForApp() throws YarnException { + for (int i = 0; i < numApps; i++) { + ApplicationId appId = ApplicationId.newInstance(clusterTs, i); + Assert.assertEquals(stateStoreTestUtil.queryApplicationHomeSC(appId), + facade.getApplicationHomeSubCluster(appId)); + } + } + + @Test + public void testAddApplicationHomeSubCluster() throws YarnException { + + // Inserting into FederationStateStore + ApplicationId appId = ApplicationId.newInstance(clusterTs, numApps + 1); + SubClusterId subClusterId1 = SubClusterId.newInstance("Home1"); + + ApplicationHomeSubCluster appHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterId1); + + SubClusterId result = + facade.addApplicationHomeSubCluster(appHomeSubCluster); + + Assert.assertEquals(facade.getApplicationHomeSubCluster(appId), result); + Assert.assertEquals(subClusterId1, result); + + // Inserting into FederationStateStore. + // The application is already present. + // FederationFacade will return Home1 as SubClusterId. + SubClusterId subClusterId2 = SubClusterId.newInstance("Home2"); + appHomeSubCluster = + ApplicationHomeSubCluster.newInstance(appId, subClusterId2); + + result = facade.addApplicationHomeSubCluster(appHomeSubCluster); + + Assert.assertEquals(facade.getApplicationHomeSubCluster(appId), result); + Assert.assertEquals(subClusterId1, result); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java new file mode 100644 index 00000000000..868e7719102 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.federation.utils; + +import javax.cache.integration.CacheLoaderException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryPolicy.RetryAction; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreException; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException; +import org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreRetriableException; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException; + +/** + * Test class to validate FederationStateStoreFacade retry policy. + */ +public class TestFederationStateStoreFacadeRetry { + + private int maxRetries = 4; + private Configuration conf; + + @Before + public void setup() { + conf = new Configuration(); + conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries); + } + + /* + * Test to validate that FederationStateStoreRetriableException is a retriable + * exception. + */ + @Test + public void testFacadeRetriableException() throws Exception { + RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf); + RetryAction action = policy.shouldRetry( + new FederationStateStoreRetriableException(""), 0, 0, false); + // We compare only the action, since delay and the reason are random values + // during this test + Assert.assertEquals(RetryAction.RETRY.action, action.action); + + // After maxRetries we stop to retry + action = policy.shouldRetry(new FederationStateStoreRetriableException(""), + maxRetries, 0, false); + Assert.assertEquals(RetryAction.FAIL.action, action.action); + } + + /* + * Test to validate that YarnException is not a retriable exception. + */ + @Test + public void testFacadeYarnException() throws Exception { + RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf); + RetryAction action = policy.shouldRetry(new YarnException(), 0, 0, false); + Assert.assertEquals(RetryAction.FAIL.action, action.action); + } + + /* + * Test to validate that FederationStateStoreException is not a retriable + * exception. + */ + @Test + public void testFacadeStateStoreException() throws Exception { + RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf); + RetryAction action = policy + .shouldRetry(new FederationStateStoreException("Error"), 0, 0, false); + Assert.assertEquals(RetryAction.FAIL.action, action.action); + } + + /* + * Test to validate that FederationStateStoreInvalidInputException is not a + * retriable exception. + */ + @Test + public void testFacadeInvalidInputException() throws Exception { + RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf); + RetryAction action = policy.shouldRetry( + new FederationStateStoreInvalidInputException(""), 0, 0, false); + Assert.assertEquals(RetryAction.FAIL.action, action.action); + } + + /* + * Test to validate that CacheLoaderException is a retriable exception. + */ + @Test + public void testFacadeCacheRetriableException() throws Exception { + RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf); + RetryAction action = + policy.shouldRetry(new CacheLoaderException(""), 0, 0, false); + // We compare only the action, since delay and the reason are random values + // during this test + Assert.assertEquals(RetryAction.RETRY.action, action.action); + + // After maxRetries we stop to retry + action = + policy.shouldRetry(new CacheLoaderException(""), maxRetries, 0, false); + Assert.assertEquals(RetryAction.FAIL.action, action.action); + } + + /* + * Test to validate that PoolInitializationException is a retriable exception. + */ + @Test + public void testFacadePoolInitRetriableException() throws Exception { + // PoolInitializationException is a retriable exception + RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf); + RetryAction action = policy.shouldRetry( + new PoolInitializationException(new YarnException()), 0, 0, false); + // We compare only the action, delay and the reason are random value + // during this test + Assert.assertEquals(RetryAction.RETRY.action, action.action); + + // After maxRetries we stop to retry + action = + policy.shouldRetry(new PoolInitializationException(new YarnException()), + maxRetries, 0, false); + Assert.assertEquals(RetryAction.FAIL.action, action.action); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java new file mode 100644 index 00000000000..9159cf75150 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java @@ -0,0 +1,335 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.uam; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; +import org.apache.hadoop.yarn.util.AsyncCallback; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Unit test for UnmanagedApplicationManager. + */ +public class TestUnmanagedApplicationManager { + private static final Logger LOG = + LoggerFactory.getLogger(TestUnmanagedApplicationManager.class); + + private TestableUnmanagedApplicationManager uam; + private Configuration conf = new YarnConfiguration(); + private CountingCallback callback; + + private ApplicationAttemptId attemptId; + + @Before + public void setup() { + conf.set(YarnConfiguration.RM_CLUSTER_ID, "subclusterId"); + callback = new CountingCallback(); + + attemptId = + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1); + + uam = new TestableUnmanagedApplicationManager(conf, + attemptId.getApplicationId(), null, "submitter", "appNameSuffix"); + } + + protected void waitForCallBackCountAndCheckZeroPending( + CountingCallback callBack, int expectCallBackCount) { + synchronized (callBack) { + while (callBack.callBackCount != expectCallBackCount) { + try { + callBack.wait(); + } catch (InterruptedException e) { + } + } + Assert.assertEquals( + "Non zero pending requests when number of allocate callbacks reaches " + + expectCallBackCount, + 0, callBack.requestQueueSize); + } + } + + @Test(timeout = 5000) + public void testBasicUsage() + throws YarnException, IOException, InterruptedException { + + createAndRegisterApplicationMaster( + RegisterApplicationMasterRequest.newInstance(null, 0, null), attemptId); + + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + + // Wait for outstanding async allocate callback + waitForCallBackCountAndCheckZeroPending(callback, 1); + + finishApplicationMaster( + FinishApplicationMasterRequest.newInstance(null, null, null), + attemptId); + } + + @Test(timeout = 5000) + public void testReRegister() + throws YarnException, IOException, InterruptedException { + + createAndRegisterApplicationMaster( + RegisterApplicationMasterRequest.newInstance(null, 0, null), attemptId); + + uam.setShouldReRegisterNext(); + + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + + // Wait for outstanding async allocate callback + waitForCallBackCountAndCheckZeroPending(callback, 1); + + uam.setShouldReRegisterNext(); + + finishApplicationMaster( + FinishApplicationMasterRequest.newInstance(null, null, null), + attemptId); + } + + /** + * If register is slow, async allocate requests in the meanwhile should not + * throw or be dropped. + */ + @Test(timeout = 5000) + public void testSlowRegisterCall() + throws YarnException, IOException, InterruptedException { + + // Register with wait() in RM in a separate thread + Thread registerAMThread = new Thread(new Runnable() { + @Override + public void run() { + try { + createAndRegisterApplicationMaster( + RegisterApplicationMasterRequest.newInstance(null, 1001, null), + attemptId); + } catch (Exception e) { + LOG.info("Register thread exception", e); + } + } + }); + + // Sync obj from mock RM + Object syncObj = MockResourceManagerFacade.getSyncObj(); + + // Wait for register call in the thread get into RM and then wake us + synchronized (syncObj) { + LOG.info("Starting register thread"); + registerAMThread.start(); + try { + LOG.info("Test main starts waiting"); + syncObj.wait(); + LOG.info("Test main wait finished"); + } catch (Exception e) { + LOG.info("Test main wait interrupted", e); + } + } + + // First allocate before register succeeds + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + + // Notify the register thread + synchronized (syncObj) { + syncObj.notifyAll(); + } + + LOG.info("Test main wait for register thread to finish"); + registerAMThread.join(); + LOG.info("Register thread finished"); + + // Second allocate, normal case + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + + // Both allocate before should respond + waitForCallBackCountAndCheckZeroPending(callback, 2); + + finishApplicationMaster( + FinishApplicationMasterRequest.newInstance(null, null, null), + attemptId); + + // Allocates after finishAM should be ignored + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + + Assert.assertEquals(0, callback.requestQueueSize); + + // A short wait just in case the allocates get executed + try { + Thread.sleep(100); + } catch (InterruptedException e) { + } + + Assert.assertEquals(2, callback.callBackCount); + } + + @Test(expected = Exception.class) + public void testAllocateWithoutRegister() + throws YarnException, IOException, InterruptedException { + allocateAsync(AllocateRequest.newInstance(0, 0, null, null, null), callback, + attemptId); + } + + @Test(expected = Exception.class) + public void testFinishWithoutRegister() + throws YarnException, IOException, InterruptedException { + finishApplicationMaster( + FinishApplicationMasterRequest.newInstance(null, null, null), + attemptId); + } + + @Test + public void testForceKill() + throws YarnException, IOException, InterruptedException { + createAndRegisterApplicationMaster( + RegisterApplicationMasterRequest.newInstance(null, 0, null), attemptId); + uam.forceKillApplication(); + + try { + uam.forceKillApplication(); + Assert.fail("Should fail because application is already killed"); + } catch (YarnException t) { + } + } + + protected UserGroupInformation getUGIWithToken( + ApplicationAttemptId appAttemptId) { + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser(appAttemptId.toString()); + AMRMTokenIdentifier token = new AMRMTokenIdentifier(appAttemptId, 1); + ugi.addTokenIdentifier(token); + return ugi; + } + + protected RegisterApplicationMasterResponse + createAndRegisterApplicationMaster( + final RegisterApplicationMasterRequest request, + ApplicationAttemptId appAttemptId) + throws YarnException, IOException, InterruptedException { + return getUGIWithToken(appAttemptId).doAs( + new PrivilegedExceptionAction() { + @Override + public RegisterApplicationMasterResponse run() + throws YarnException, IOException { + RegisterApplicationMasterResponse response = + uam.createAndRegisterApplicationMaster(request); + return response; + } + }); + } + + protected void allocateAsync(final AllocateRequest request, + final AsyncCallback callBack, + ApplicationAttemptId appAttemptId) + throws YarnException, IOException, InterruptedException { + getUGIWithToken(appAttemptId).doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws YarnException { + uam.allocateAsync(request, callBack); + return null; + } + }); + } + + protected FinishApplicationMasterResponse finishApplicationMaster( + final FinishApplicationMasterRequest request, + ApplicationAttemptId appAttemptId) + throws YarnException, IOException, InterruptedException { + return getUGIWithToken(appAttemptId) + .doAs(new PrivilegedExceptionAction() { + @Override + public FinishApplicationMasterResponse run() + throws YarnException, IOException { + FinishApplicationMasterResponse response = + uam.finishApplicationMaster(request); + return response; + } + }); + } + + protected class CountingCallback implements AsyncCallback { + private int callBackCount; + private int requestQueueSize; + + @Override + public void callback(AllocateResponse response) { + synchronized (this) { + callBackCount++; + requestQueueSize = uam.getRequestQueueSize(); + this.notifyAll(); + } + } + } + + /** + * Testable UnmanagedApplicationManager that talks to a mock RM. + */ + public static class TestableUnmanagedApplicationManager + extends UnmanagedApplicationManager { + + private MockResourceManagerFacade rmProxy; + + public TestableUnmanagedApplicationManager(Configuration conf, + ApplicationId appId, String queueName, String submitter, + String appNameSuffix) { + super(conf, appId, queueName, submitter, appNameSuffix); + } + + @SuppressWarnings("unchecked") + @Override + protected T createRMProxy(final Class protocol, Configuration config, + UserGroupInformation user, Token token) { + if (rmProxy == null) { + rmProxy = new MockResourceManagerFacade(config, 0); + } + return (T) rmProxy; + } + + public void setShouldReRegisterNext() { + if (rmProxy != null) { + rmProxy.setShouldReRegisterNext(); + } + } + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/resources/nodes b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/resources/nodes new file mode 100644 index 00000000000..2b7e2372f1f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/resources/nodes @@ -0,0 +1,8 @@ +node1,subcluster1,rack1 + node2 , subcluster2, RACK1 +noDE3,subcluster3, rack2 +node4, subcluster3, rack2 +subcluster0-rack0-host0,subcluster0, subcluster0-rack0 +Subcluster1-RACK1-HOST1,subcluster1, subCluster1-RACK1 +SUBCLUSTER1-RACK1-HOST2,subcluster1, subCluster1-RACK1 +SubCluster2-RACK3-HOST3,subcluster2, subcluster2-rack3 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/resources/nodes-malformed b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/resources/nodes-malformed new file mode 100644 index 00000000000..6d0aa39fc9c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/resources/nodes-malformed @@ -0,0 +1,3 @@ +node1, +node2,subcluster2,subCluster2, rack1 +node3,subcluster3, rack2 \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index 094519afe6c..a50a769b0eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -172,6 +172,13 @@ org.fusesource.leveldbjni leveldbjni-all + + + org.apache.hadoop + hadoop-yarn-server-common + test-jar + test + @@ -208,6 +215,44 @@ ${project.build.directory}/native-results + + cetest + cmake-test + test + + + cetest + ${project.build.directory}/native/test + ${basedir}/src + ${project.build.directory}/native/test/cetest + + --gtest_filter=-Perf. + --gtest_output=xml:${project.build.directory}/surefire-reports/TEST-cetest.xml + + ${project.build.directory}/surefire-reports + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + make + compile + + run + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt index f7fe83d6ebc..07c29bf91af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt @@ -19,6 +19,9 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR) list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common) include(HadoopCommon) +# Set gtest path +set(GTEST_SRC_DIR ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common/src/main/native/gtest) + # determine if container-executor.conf.dir is an absolute # path in case the OS we're compiling on doesn't have # a hook in get_executable. We'll use this define @@ -80,29 +83,59 @@ endfunction() include_directories( ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_BINARY_DIR} + ${GTEST_SRC_DIR}/include main/native/container-executor main/native/container-executor/impl ) +# add gtest as system library to suppress gcc warnings +include_directories(SYSTEM ${GTEST_SRC_DIR}/include) + configure_file(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h) +add_library(gtest ${GTEST_SRC_DIR}/gtest-all.cc) +set_target_properties(gtest PROPERTIES COMPILE_FLAGS "-w") + add_library(container + main/native/container-executor/impl/util.c main/native/container-executor/impl/configuration.c main/native/container-executor/impl/container-executor.c main/native/container-executor/impl/get_executable.c + main/native/container-executor/impl/utils/string-utils.c + main/native/container-executor/impl/utils/path-utils.c + main/native/container-executor/impl/modules/cgroups/cgroups-operations.c + main/native/container-executor/impl/modules/common/module-configs.c + main/native/container-executor/impl/modules/gpu/gpu-module.c ) add_executable(container-executor main/native/container-executor/impl/main.c ) + target_link_libraries(container-executor container ) + output_directory(container-executor target/usr/local/bin) +# Test cases add_executable(test-container-executor main/native/container-executor/test/test-container-executor.c ) target_link_libraries(test-container-executor container ${EXTRA_LIBS} ) + output_directory(test-container-executor target/usr/local/bin) + +# unit tests for container executor +add_executable(cetest + main/native/container-executor/impl/util.c + main/native/container-executor/test/test_configuration.cc + main/native/container-executor/test/test_main.cc + main/native/container-executor/test/utils/test-string-utils.cc + main/native/container-executor/test/utils/test-path-utils.cc + main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc + main/native/container-executor/test/modules/gpu/test-gpu-module.cc + main/native/container-executor/test/test_util.cc) +target_link_libraries(cetest gtest container) +output_directory(cetest test) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java new file mode 100644 index 00000000000..5e41701faa6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrUpdateContainersEvent.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager; + +import org.apache.hadoop.yarn.api.records.Container; +import java.util.List; + +/** + * Event used by the NodeStatusUpdater to notify the ContainerManager of + * container update commands it received from the RM. + */ +public class CMgrUpdateContainersEvent extends ContainerManagerEvent { + + private final List containersToUpdate; + + /** + * Create event. + * @param containersToUpdate Container to update. + */ + public CMgrUpdateContainersEvent(List containersToUpdate) { + super(ContainerManagerEventType.UPDATE_CONTAINERS); + this.containersToUpdate = containersToUpdate; + } + + /** + * Get containers to update. + * @return List of containers to update. + */ + public List getContainersToUpdate() { + return this.containersToUpdate; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java index 8861bc7577f..8c5f7e21f1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerManagerEventType.java @@ -21,6 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager; public enum ContainerManagerEventType { FINISH_APPS, FINISH_CONTAINERS, - DECREASE_CONTAINERS_RESOURCE, + UPDATE_CONTAINERS, SIGNAL_CONTAINERS } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java index ae2a4ef1ca4..502485f9091 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java @@ -38,6 +38,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -99,6 +100,7 @@ public class DirectoryCollection { private List localDirs; private List errorDirs; private List fullDirs; + private Map directoryErrorInfo; // read/write lock for accessing above directories. private final ReadLock readLock; @@ -192,6 +194,7 @@ public class DirectoryCollection { localDirs = new CopyOnWriteArrayList<>(dirs); errorDirs = new CopyOnWriteArrayList<>(); fullDirs = new CopyOnWriteArrayList<>(); + directoryErrorInfo = new ConcurrentHashMap<>(); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); @@ -248,11 +251,25 @@ public class DirectoryCollection { /** * @return the directories that have used all disk space */ - List getFullDirs() { this.readLock.lock(); try { - return fullDirs; + return Collections.unmodifiableList(fullDirs); + } finally { + this.readLock.unlock(); + } + } + + /** + * @return the directories that have errors - many not have appropriate permissions + * or other disk validation checks might have failed in {@link DiskValidator} + * + */ + @InterfaceStability.Evolving + List getErroredDirs() { + this.readLock.lock(); + try { + return Collections.unmodifiableList(errorDirs); } finally { this.readLock.unlock(); } @@ -270,6 +287,39 @@ public class DirectoryCollection { } } + /** + * + * @param dirName Absolute path of Directory for which error diagnostics are needed + * @return DiskErrorInformation - disk error diagnostics for the specified directory + * null - the disk associated with the directory has passed disk utilization checks + * /error validations in {@link DiskValidator} + * + */ + @InterfaceStability.Evolving + DiskErrorInformation getDirectoryErrorInfo(String dirName) { + this.readLock.lock(); + try { + return directoryErrorInfo.get(dirName); + } finally { + this.readLock.unlock(); + } + } + + /** + * + * @param dirName Absolute path of Directory for which the disk has been marked as unhealthy + * @return Check if disk associated with the directory is unhealthy + */ + @InterfaceStability.Evolving + boolean isDiskUnHealthy(String dirName) { + this.readLock.lock(); + try { + return directoryErrorInfo.containsKey(dirName); + } finally { + this.readLock.unlock(); + } + } + /** * Create any non-existent directories and parent directories, updating the * list of valid directories if necessary. @@ -297,6 +347,9 @@ public class DirectoryCollection { try { localDirs.remove(dir); errorDirs.add(dir); + directoryErrorInfo.put(dir, + new DiskErrorInformation(DiskErrorCause.OTHER, + "Cannot create directory : " + dir + ", error " + e.getMessage())); numFailures++; } finally { this.writeLock.unlock(); @@ -343,11 +396,13 @@ public class DirectoryCollection { localDirs.clear(); errorDirs.clear(); fullDirs.clear(); + directoryErrorInfo.clear(); for (Map.Entry entry : dirsFailedCheck .entrySet()) { String dir = entry.getKey(); DiskErrorInformation errorInformation = entry.getValue(); + switch (entry.getValue().cause) { case DISK_FULL: fullDirs.add(entry.getKey()); @@ -359,6 +414,8 @@ public class DirectoryCollection { LOG.warn(entry.getValue().cause + " is unknown for disk error."); break; } + directoryErrorInfo.put(entry.getKey(), errorInformation); + if (preCheckGoodDirs.contains(dir)) { LOG.warn("Directory " + dir + " error, " + errorInformation.message + ", removing from list of valid directories"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 2aaa8359e7b..b3e13b4942e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -307,7 +307,7 @@ public class LinuxContainerExecutor extends ContainerExecutor { .getConfiguredResourceHandlerChain(conf); if (LOG.isDebugEnabled()) { LOG.debug("Resource handler chain enabled = " + (resourceHandlerChain - == null)); + != null)); } if (resourceHandlerChain != null) { LOG.debug("Bootstrapping resource handler chain"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java index f8cb4eee709..6e00808ae88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java @@ -53,6 +53,8 @@ public class LocalDirsHandlerService extends AbstractService { private static Log LOG = LogFactory.getLog(LocalDirsHandlerService.class); + private static final String diskCapacityExceededErrorMsg = "usable space is below configured utilization percentage/no more usable space"; + /** * Good local directories, use internally, * initial value is the same as NM_LOCAL_DIRS. @@ -344,21 +346,36 @@ public class LocalDirsHandlerService extends AbstractService { } StringBuilder report = new StringBuilder(); - List failedLocalDirsList = localDirs.getFailedDirs(); - List failedLogDirsList = logDirs.getFailedDirs(); + List erroredLocalDirsList = localDirs.getErroredDirs(); + List erroredLogDirsList = logDirs.getErroredDirs(); + List diskFullLocalDirsList = localDirs.getFullDirs(); + List diskFullLogDirsList = logDirs.getFullDirs(); List goodLocalDirsList = localDirs.getGoodDirs(); List goodLogDirsList = logDirs.getGoodDirs(); - int numLocalDirs = goodLocalDirsList.size() + failedLocalDirsList.size(); - int numLogDirs = goodLogDirsList.size() + failedLogDirsList.size(); + + int numLocalDirs = goodLocalDirsList.size() + erroredLocalDirsList.size() + diskFullLocalDirsList.size(); + int numLogDirs = goodLogDirsList.size() + erroredLogDirsList.size() + diskFullLogDirsList.size(); if (!listGoodDirs) { - if (!failedLocalDirsList.isEmpty()) { - report.append(failedLocalDirsList.size() + "/" + numLocalDirs - + " local-dirs are bad: " - + StringUtils.join(",", failedLocalDirsList) + "; "); + if (!erroredLocalDirsList.isEmpty()) { + report.append(erroredLocalDirsList.size() + "/" + numLocalDirs + + " local-dirs have errors: " + + buildDiskErrorReport(erroredLocalDirsList, localDirs)); } - if (!failedLogDirsList.isEmpty()) { - report.append(failedLogDirsList.size() + "/" + numLogDirs - + " log-dirs are bad: " + StringUtils.join(",", failedLogDirsList)); + if (!diskFullLocalDirsList.isEmpty()) { + report.append(diskFullLocalDirsList.size() + "/" + numLocalDirs + + " local-dirs " + diskCapacityExceededErrorMsg + + buildDiskErrorReport(diskFullLocalDirsList, localDirs) + "; "); + } + + if (!erroredLogDirsList.isEmpty()) { + report.append(erroredLogDirsList.size() + "/" + numLogDirs + + " log-dirs have errors: " + + buildDiskErrorReport(erroredLogDirsList, logDirs)); + } + if (!diskFullLogDirsList.isEmpty()) { + report.append(diskFullLogDirsList.size() + "/" + numLogDirs + + " log-dirs " + diskCapacityExceededErrorMsg + + buildDiskErrorReport(diskFullLogDirsList, logDirs)); } } else { report.append(goodLocalDirsList.size() + "/" + numLocalDirs @@ -620,4 +637,24 @@ public class LocalDirsHandlerService extends AbstractService { logDirs.getGoodDirsDiskUtilizationPercentage()); } } + + private String buildDiskErrorReport(List dirs, DirectoryCollection directoryCollection) { + StringBuilder sb = new StringBuilder(); + + sb.append(" [ "); + for (int i = 0; i < dirs.size(); i++) { + final String dirName = dirs.get(i); + if ( directoryCollection.isDiskUnHealthy(dirName)) { + sb.append(dirName + " : " + directoryCollection.getDirectoryErrorInfo(dirName).message); + } else { + sb.append(dirName + " : " + "Unknown cause for disk error"); + } + + if ( i != (dirs.size() - 1)) { + sb.append(" , "); + } + } + sb.append(" ] "); + return sb.toString(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 00073d85aae..ade42e3907a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -639,7 +639,6 @@ public class NodeStatusUpdaterImpl extends AbstractService implements public void removeOrTrackCompletedContainersFromContext( List containerIds) throws IOException { Set removedContainers = new HashSet(); - Set removedNullContainers = new HashSet(); pendingContainersToRemove.addAll(containerIds); Iterator iter = pendingContainersToRemove.iterator(); @@ -649,7 +648,6 @@ public class NodeStatusUpdaterImpl extends AbstractService implements Container nmContainer = context.getContainers().get(containerId); if (nmContainer == null) { iter.remove(); - removedNullContainers.add(containerId); } else if (nmContainer.getContainerState().equals( org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.DONE)) { context.getContainers().remove(containerId); @@ -712,11 +710,12 @@ public class NodeStatusUpdaterImpl extends AbstractService implements public void removeVeryOldStoppedContainersFromCache() { synchronized (recentlyStoppedContainers) { long currentTime = System.currentTimeMillis(); - Iterator i = - recentlyStoppedContainers.keySet().iterator(); + Iterator> i = + recentlyStoppedContainers.entrySet().iterator(); while (i.hasNext()) { - ContainerId cid = i.next(); - if (recentlyStoppedContainers.get(cid) < currentTime) { + Entry mapEntry = i.next(); + ContainerId cid = mapEntry.getKey(); + if (mapEntry.getValue() < currentTime) { if (!context.getContainers().containsKey(cid)) { i.remove(); try { @@ -1100,12 +1099,10 @@ public class NodeStatusUpdaterImpl extends AbstractService implements parseCredentials(systemCredentials)); } List - containersToDecrease = response.getContainersToDecrease(); - if (!containersToDecrease.isEmpty()) { + containersToUpdate = response.getContainersToUpdate(); + if (!containersToUpdate.isEmpty()) { dispatcher.getEventHandler().handle( - new CMgrDecreaseContainersResourceEvent( - containersToDecrease) - ); + new CMgrUpdateContainersEvent(containersToUpdate)); } // SignalContainer request originally comes from end users via diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java index 22fc8f61014..3ba4d20d6e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java @@ -36,7 +36,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; -import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -48,6 +47,7 @@ import org.apache.hadoop.yarn.server.api.ServerRMProxy; import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterDistributedSchedulingAMResponse; +import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -134,7 +134,8 @@ public final class DefaultRequestInterceptor extends } AllocateResponse allocateResponse = rmClient.allocate(request); if (allocateResponse.getAMRMToken() != null) { - updateAMRMToken(allocateResponse.getAMRMToken()); + YarnServerSecurityUtils.updateAMRMToken(allocateResponse.getAMRMToken(), + this.user, getConf()); } return allocateResponse; @@ -170,7 +171,9 @@ public final class DefaultRequestInterceptor extends ((DistributedSchedulingAMProtocol)rmClient) .allocateForDistributedScheduling(request); if (allocateResponse.getAllocateResponse().getAMRMToken() != null) { - updateAMRMToken(allocateResponse.getAllocateResponse().getAMRMToken()); + YarnServerSecurityUtils.updateAMRMToken( + allocateResponse.getAllocateResponse().getAMRMToken(), this.user, + getConf()); } return allocateResponse; } else { @@ -195,18 +198,6 @@ public final class DefaultRequestInterceptor extends + "Check if the interceptor pipeline configuration is correct"); } - private void updateAMRMToken(Token token) throws IOException { - org.apache.hadoop.security.token.Token amrmToken = - new org.apache.hadoop.security.token.Token( - token.getIdentifier().array(), token.getPassword().array(), - new Text(token.getKind()), new Text(token.getService())); - // Preserve the token service sent by the RM when adding the token - // to ensure we replace the previous token setup by the RM. - // Afterwards we can update the service address for the RPC layer. - user.addToken(amrmToken); - amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConf())); - } - @VisibleForTesting public void setRMClient(final ApplicationMasterProtocol rmClient) { if (rmClient instanceof DistributedSchedulingAMProtocol) { @@ -257,18 +248,11 @@ public final class DefaultRequestInterceptor extends for (org.apache.hadoop.security.token.Token token : UserGroupInformation .getCurrentUser().getTokens()) { if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { - token.setService(getAMRMTokenService(conf)); + token.setService(ClientRMProxy.getAMRMTokenService(conf)); } } } - @InterfaceStability.Unstable - public static Text getAMRMTokenService(Configuration conf) { - return getTokenService(conf, YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); - } - @InterfaceStability.Unstable public static Text getTokenService(Configuration conf, String address, String defaultAddr, int defaultPort) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java new file mode 100644 index 00000000000..28724aaf25c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java @@ -0,0 +1,1150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.amrmproxy; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NMToken; +import org.apache.hadoop.yarn.api.records.PreemptionContract; +import org.apache.hadoop.yarn.api.records.PreemptionMessage; +import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.StrictPreemptionContract; +import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.failover.FederationProxyProviderUtil; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.uam.UnmanagedAMPoolManager; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; +import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; +import org.apache.hadoop.yarn.util.AsyncCallback; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +/** + * Extends the AbstractRequestInterceptor and provides an implementation for + * federation of YARN RM and scaling an application across multiple YARN + * sub-clusters. All the federation specific implementation is encapsulated in + * this class. This is always the last intercepter in the chain. + */ +public class FederationInterceptor extends AbstractRequestInterceptor { + private static final Logger LOG = + LoggerFactory.getLogger(FederationInterceptor.class); + + /** + * The home sub-cluster is the sub-cluster where the AM container is running + * in. + */ + private ApplicationMasterProtocol homeRM; + private SubClusterId homeSubClusterId; + + /** + * UAM pool for secondary sub-clusters (ones other than home sub-cluster), + * using subClusterId as uamId. One UAM is created per sub-cluster RM except + * the home RM. + * + * Creation and register of UAM in secondary sub-clusters happen on-demand, + * when AMRMProxy policy routes resource request to these sub-clusters for the + * first time. AM heart beats to them are also handled asynchronously for + * performance reasons. + */ + private UnmanagedAMPoolManager uamPool; + + /** Thread pool used for asynchronous operations. */ + private ExecutorService threadpool; + + /** + * Stores the AllocateResponses that are received asynchronously from all the + * sub-cluster resource managers except the home RM. + */ + private Map> asyncResponseSink; + + /** + * Used to keep track of the container Id and the sub cluster RM that created + * the container, so that we know which sub-cluster to forward later requests + * about existing containers to. + */ + private Map containerIdToSubClusterIdMap; + + /** + * The original registration request that was sent by the AM. This instance is + * reused to register/re-register with all the sub-cluster RMs. + */ + private RegisterApplicationMasterRequest amRegistrationRequest; + + /** + * The original registration response from home RM. This instance is reused + * for duplicate register request from AM, triggered by timeout between AM and + * AMRMProxy. + */ + private RegisterApplicationMasterResponse amRegistrationResponse; + + private FederationStateStoreFacade federationFacade; + + private SubClusterResolver subClusterResolver; + + /** The policy used to split requests among sub-clusters. */ + private FederationAMRMProxyPolicy policyInterpreter; + + /** + * The proxy ugi used to talk to home RM, loaded with the up-to-date AMRMToken + * issued by home RM. + */ + private UserGroupInformation appOwner; + + /** + * Creates an instance of the FederationInterceptor class. + */ + public FederationInterceptor() { + this.containerIdToSubClusterIdMap = new ConcurrentHashMap<>(); + this.asyncResponseSink = new ConcurrentHashMap<>(); + this.threadpool = Executors.newCachedThreadPool(); + this.uamPool = createUnmanagedAMPoolManager(this.threadpool); + this.amRegistrationRequest = null; + this.amRegistrationResponse = null; + } + + /** + * Initializes the instance using specified context. + */ + @Override + public void init(AMRMProxyApplicationContext appContext) { + super.init(appContext); + LOG.info("Initializing Federation Interceptor"); + + // Update the conf if available + Configuration conf = appContext.getConf(); + if (conf == null) { + conf = getConf(); + } else { + setConf(conf); + } + + try { + this.appOwner = UserGroupInformation.createProxyUser(appContext.getUser(), + UserGroupInformation.getCurrentUser()); + } catch (Exception ex) { + throw new YarnRuntimeException(ex); + } + + this.homeSubClusterId = + SubClusterId.newInstance(YarnConfiguration.getClusterId(conf)); + this.homeRM = createHomeRMProxy(appContext); + + this.federationFacade = FederationStateStoreFacade.getInstance(); + this.subClusterResolver = this.federationFacade.getSubClusterResolver(); + + // AMRMProxyPolicy will be initialized in registerApplicationMaster + this.policyInterpreter = null; + + this.uamPool.init(conf); + this.uamPool.start(); + } + + /** + * Sends the application master's registration request to the home RM. + * + * Between AM and AMRMProxy, FederationInterceptor modifies the RM behavior, + * so that when AM registers more than once, it returns the same register + * success response instead of throwing + * {@link InvalidApplicationMasterRequestException}. Furthermore, we present + * to AM as if we are the RM that never fails over. When actual RM fails over, + * we always re-register automatically. + * + * We did this because FederationInterceptor can receive concurrent register + * requests from AM because of timeout between AM and AMRMProxy, which is + * shorter than the timeout + failOver between FederationInterceptor + * (AMRMProxy) and RM. + * + * For the same reason, this method needs to be synchronized. + */ + @Override + public synchronized RegisterApplicationMasterResponse + registerApplicationMaster(RegisterApplicationMasterRequest request) + throws YarnException, IOException { + // If AM is calling with a different request, complain + if (this.amRegistrationRequest != null) { + if (!this.amRegistrationRequest.equals(request)) { + throw new YarnException("AM should not call " + + "registerApplicationMaster with a different request body"); + } + } else { + // Save the registration request. This will be used for registering with + // secondary sub-clusters using UAMs, as well as re-register later + this.amRegistrationRequest = request; + } + + /* + * Present to AM as if we are the RM that never fails over. When actual RM + * fails over, we always re-register automatically. + * + * We did this because it is possible for AM to send duplicate register + * request because of timeout. When it happens, it is fine to simply return + * the success message. Out of all outstanding register threads, only the + * last one will still have an unbroken RPC connection and successfully + * return the response. + */ + if (this.amRegistrationResponse != null) { + return this.amRegistrationResponse; + } + + /* + * Send a registration request to the home resource manager. Note that here + * we don't register with other sub-cluster resource managers because that + * will prevent us from using new sub-clusters that get added while the AM + * is running and will breaks the elasticity feature. The registration with + * the other sub-cluster RM will be done lazily as needed later. + */ + this.amRegistrationResponse = + this.homeRM.registerApplicationMaster(request); + + // the queue this application belongs will be used for getting + // AMRMProxy policy from state store. + String queue = this.amRegistrationResponse.getQueue(); + if (queue == null) { + LOG.warn("Received null queue for application " + + getApplicationContext().getApplicationAttemptId().getApplicationId() + + " from home sub-cluster. Will use default queue name " + + YarnConfiguration.DEFAULT_QUEUE_NAME + + " for getting AMRMProxyPolicy"); + } else { + LOG.info("Application " + + getApplicationContext().getApplicationAttemptId().getApplicationId() + + " belongs to queue " + queue); + } + + // Initialize the AMRMProxyPolicy + try { + this.policyInterpreter = + FederationPolicyUtils.loadAMRMPolicy(queue, this.policyInterpreter, + getConf(), this.federationFacade, this.homeSubClusterId); + } catch (FederationPolicyInitializationException e) { + throw new YarnRuntimeException(e); + } + return this.amRegistrationResponse; + } + + /** + * Sends the heart beats to the home RM and the secondary sub-cluster RMs that + * are being used by the application. + */ + @Override + public AllocateResponse allocate(AllocateRequest request) + throws YarnException { + Preconditions.checkArgument(this.policyInterpreter != null, + "Allocate should be called after registerApplicationMaster"); + + try { + // Split the heart beat request into multiple requests, one for each + // sub-cluster RM that is used by this application. + Map requests = + splitAllocateRequest(request); + + // Send the requests to the secondary sub-cluster resource managers. + // These secondary requests are send asynchronously and the responses will + // be collected and merged with the home response. In addition, it also + // return the newly registered Unmanaged AMs. + Registrations newRegistrations = + sendRequestsToSecondaryResourceManagers(requests); + + // Send the request to the home RM and get the response + AllocateResponse homeResponse = AMRMClientUtils.allocateWithReRegister( + requests.get(this.homeSubClusterId), this.homeRM, + this.amRegistrationRequest, + getApplicationContext().getApplicationAttemptId()); + + // Notify policy of home response + try { + this.policyInterpreter.notifyOfResponse(this.homeSubClusterId, + homeResponse); + } catch (YarnException e) { + LOG.warn("notifyOfResponse for policy failed for home sub-cluster " + + this.homeSubClusterId, e); + } + + // If the resource manager sent us a new token, add to the current user + if (homeResponse.getAMRMToken() != null) { + LOG.debug("Received new AMRMToken"); + YarnServerSecurityUtils.updateAMRMToken(homeResponse.getAMRMToken(), + this.appOwner, getConf()); + } + + // Merge the responses from home and secondary sub-cluster RMs + homeResponse = mergeAllocateResponses(homeResponse); + + // Merge the containers and NMTokens from the new registrations into + // the homeResponse. + if (!isNullOrEmpty(newRegistrations.getSuccessfulRegistrations())) { + homeResponse = mergeRegistrationResponses(homeResponse, + newRegistrations.getSuccessfulRegistrations()); + } + + // return the final response to the application master. + return homeResponse; + } catch (IOException ex) { + LOG.error("Exception encountered while processing heart beat", ex); + throw new YarnException(ex); + } + } + + /** + * Sends the finish application master request to all the resource managers + * used by the application. + */ + @Override + public FinishApplicationMasterResponse finishApplicationMaster( + FinishApplicationMasterRequest request) + throws YarnException, IOException { + + // TODO: consider adding batchFinishApplicationMaster in UAMPoolManager + boolean failedToUnRegister = false; + ExecutorCompletionService compSvc = + null; + + // Application master is completing operation. Send the finish + // application master request to all the registered sub-cluster resource + // managers in parallel, wait for the responses and aggregate the results. + Set subClusterIds = this.uamPool.getAllUAMIds(); + if (subClusterIds.size() > 0) { + final FinishApplicationMasterRequest finishRequest = request; + compSvc = + new ExecutorCompletionService( + this.threadpool); + + LOG.info("Sending finish application request to {} sub-cluster RMs", + subClusterIds.size()); + for (final String subClusterId : subClusterIds) { + compSvc.submit(new Callable() { + @Override + public FinishApplicationMasterResponseInfo call() throws Exception { + LOG.info("Sending finish application request to RM {}", + subClusterId); + FinishApplicationMasterResponse uamResponse = null; + try { + uamResponse = + uamPool.finishApplicationMaster(subClusterId, finishRequest); + } catch (Throwable e) { + LOG.warn("Failed to finish unmanaged application master: " + + "RM address: " + subClusterId + " ApplicationId: " + + getApplicationContext().getApplicationAttemptId(), e); + } + return new FinishApplicationMasterResponseInfo(uamResponse, + subClusterId); + } + }); + } + } + + // While the finish application request is being processed + // asynchronously by other sub-cluster resource managers, send the same + // request to the home resource manager on this thread. + FinishApplicationMasterResponse homeResponse = + AMRMClientUtils.finishAMWithReRegister(request, this.homeRM, + this.amRegistrationRequest, + getApplicationContext().getApplicationAttemptId()); + + if (subClusterIds.size() > 0) { + // Wait for other sub-cluster resource managers to return the + // response and merge it with the home response + LOG.info( + "Waiting for finish application response from {} sub-cluster RMs", + subClusterIds.size()); + for (int i = 0; i < subClusterIds.size(); ++i) { + try { + Future future = compSvc.take(); + FinishApplicationMasterResponseInfo uamResponse = future.get(); + if (LOG.isDebugEnabled()) { + LOG.debug("Received finish application response from RM: " + + uamResponse.getSubClusterId()); + } + if (uamResponse.getResponse() == null + || !uamResponse.getResponse().getIsUnregistered()) { + failedToUnRegister = true; + } + } catch (Throwable e) { + failedToUnRegister = true; + LOG.warn("Failed to finish unmanaged application master: " + + " ApplicationId: " + + getApplicationContext().getApplicationAttemptId(), e); + } + } + } + + if (failedToUnRegister) { + homeResponse.setIsUnregistered(false); + } + return homeResponse; + } + + @Override + public void setNextInterceptor(RequestInterceptor next) { + throw new YarnRuntimeException( + "setNextInterceptor is being called on FederationInterceptor. " + + "It should always be used as the last interceptor in the chain"); + } + + /** + * This is called when the application pipeline is being destroyed. We will + * release all the resources that we are holding in this call. + */ + @Override + public void shutdown() { + if (this.uamPool != null) { + this.uamPool.stop(); + } + if (threadpool != null) { + try { + threadpool.shutdown(); + } catch (Throwable ex) { + } + threadpool = null; + } + super.shutdown(); + } + + /** + * Create the UAM pool manager for secondary sub-clsuters. For unit test to + * override. + * + * @param threadPool the thread pool to use + * @return the UAM pool manager instance + */ + @VisibleForTesting + protected UnmanagedAMPoolManager createUnmanagedAMPoolManager( + ExecutorService threadPool) { + return new UnmanagedAMPoolManager(threadPool); + } + + /** + * Returns instance of the ApplicationMasterProtocol proxy class that is used + * to connect to the Home resource manager. + * + * @param appContext AMRMProxyApplicationContext + * @return the proxy created + */ + protected ApplicationMasterProtocol createHomeRMProxy( + AMRMProxyApplicationContext appContext) { + try { + return FederationProxyProviderUtil.createRMProxy(appContext.getConf(), + ApplicationMasterProtocol.class, this.homeSubClusterId, this.appOwner, + appContext.getAMRMToken()); + } catch (Exception ex) { + throw new YarnRuntimeException(ex); + } + } + + private SubClusterId getSubClusterForNode(String nodeName) { + SubClusterId subClusterId = null; + try { + subClusterId = this.subClusterResolver.getSubClusterForNode(nodeName); + } catch (YarnException e) { + LOG.error("Failed to resolve sub-cluster for node " + nodeName + + ", skipping this node", e); + return null; + } + if (subClusterId == null) { + LOG.error("Failed to resolve sub-cluster for node {}, skipping this node", + nodeName); + return null; + } + return subClusterId; + } + + /** + * In federation, the heart beat request needs to be sent to all the sub + * clusters from which the AM has requested containers. This method splits the + * specified AllocateRequest from the AM and creates a new request for each + * sub-cluster RM. + */ + private Map splitAllocateRequest( + AllocateRequest request) throws YarnException { + Map requestMap = + new HashMap(); + + // Create heart beat request for home sub-cluster resource manager + findOrCreateAllocateRequestForSubCluster(this.homeSubClusterId, request, + requestMap); + + // Create heart beat request instances for all other already registered + // sub-cluster resource managers + Set subClusterIds = this.uamPool.getAllUAMIds(); + for (String subClusterId : subClusterIds) { + findOrCreateAllocateRequestForSubCluster( + SubClusterId.newInstance(subClusterId), request, requestMap); + } + + if (!isNullOrEmpty(request.getAskList())) { + // Ask the federation policy interpreter to split the ask list for + // sending it to all the sub-cluster resource managers. + Map> asks = + splitResourceRequests(request.getAskList()); + + // Add the askLists to the corresponding sub-cluster requests. + for (Entry> entry : asks.entrySet()) { + AllocateRequest newRequest = findOrCreateAllocateRequestForSubCluster( + entry.getKey(), request, requestMap); + newRequest.getAskList().addAll(entry.getValue()); + } + } + + if (request.getResourceBlacklistRequest() != null && !isNullOrEmpty( + request.getResourceBlacklistRequest().getBlacklistAdditions())) { + for (String resourceName : request.getResourceBlacklistRequest() + .getBlacklistAdditions()) { + SubClusterId subClusterId = getSubClusterForNode(resourceName); + if (subClusterId != null) { + AllocateRequest newRequest = findOrCreateAllocateRequestForSubCluster( + subClusterId, request, requestMap); + newRequest.getResourceBlacklistRequest().getBlacklistAdditions() + .add(resourceName); + } + } + } + + if (request.getResourceBlacklistRequest() != null && !isNullOrEmpty( + request.getResourceBlacklistRequest().getBlacklistRemovals())) { + for (String resourceName : request.getResourceBlacklistRequest() + .getBlacklistRemovals()) { + SubClusterId subClusterId = getSubClusterForNode(resourceName); + if (subClusterId != null) { + AllocateRequest newRequest = findOrCreateAllocateRequestForSubCluster( + subClusterId, request, requestMap); + newRequest.getResourceBlacklistRequest().getBlacklistRemovals() + .add(resourceName); + } + } + } + + if (!isNullOrEmpty(request.getReleaseList())) { + for (ContainerId cid : request.getReleaseList()) { + if (warnIfNotExists(cid, "release")) { + SubClusterId subClusterId = + this.containerIdToSubClusterIdMap.get(cid); + AllocateRequest newRequest = requestMap.get(subClusterId); + newRequest.getReleaseList().add(cid); + } + } + } + + if (!isNullOrEmpty(request.getUpdateRequests())) { + for (UpdateContainerRequest ucr : request.getUpdateRequests()) { + if (warnIfNotExists(ucr.getContainerId(), "update")) { + SubClusterId subClusterId = + this.containerIdToSubClusterIdMap.get(ucr.getContainerId()); + AllocateRequest newRequest = requestMap.get(subClusterId); + newRequest.getUpdateRequests().add(ucr); + } + } + } + + return requestMap; + } + + /** + * This methods sends the specified AllocateRequests to the appropriate + * sub-cluster resource managers. + * + * @param requests contains the heart beat requests to send to the resource + * manager keyed by the resource manager address + * @return the registration responses from the newly added sub-cluster + * resource managers + * @throws YarnException + * @throws IOException + */ + private Registrations sendRequestsToSecondaryResourceManagers( + Map requests) + throws YarnException, IOException { + + // Create new UAM instances for the sub-cluster that we have not seen + // before + Registrations registrations = registerWithNewSubClusters(requests.keySet()); + + // Now that all the registrations are done, send the allocation request + // to the sub-cluster RMs using the Unmanaged application masters + // asynchronously and don't wait for the response. The responses will + // arrive asynchronously and will be added to the response sink. These + // responses will be sent to the application master in some future heart + // beat response. + for (Entry entry : requests.entrySet()) { + final SubClusterId subClusterId = entry.getKey(); + + if (subClusterId.equals(this.homeSubClusterId)) { + // Skip the request for the home sub-cluster resource manager. + // It will be handled separately in the allocate() method + continue; + } + + if (!this.uamPool.hasUAMId(subClusterId.getId())) { + // TODO: This means that the registration for this sub-cluster RM + // failed. For now, we ignore the resource requests and continue + // but we need to fix this and handle this situation. One way would + // be to send the request to another RM by consulting the policy. + LOG.warn("Unmanaged AM registration not found for sub-cluster {}", + subClusterId); + continue; + } + + this.uamPool.allocateAsync(subClusterId.getId(), entry.getValue(), + new AsyncCallback() { + @Override + public void callback(AllocateResponse response) { + synchronized (asyncResponseSink) { + List responses = null; + if (asyncResponseSink.containsKey(subClusterId)) { + responses = asyncResponseSink.get(subClusterId); + } else { + responses = new ArrayList<>(); + asyncResponseSink.put(subClusterId, responses); + } + responses.add(response); + } + + // Notify policy of secondary sub-cluster responses + try { + policyInterpreter.notifyOfResponse(subClusterId, response); + } catch (YarnException e) { + LOG.warn( + "notifyOfResponse for policy failed for home sub-cluster " + + subClusterId, + e); + } + } + }); + } + + return registrations; + } + + /** + * This method ensures that Unmanaged AMs are created for each of the + * specified sub-cluster specified in the input and registers with the + * corresponding resource managers. + */ + private Registrations registerWithNewSubClusters( + Set subClusterSet) throws IOException { + + List failedRegistrations = new ArrayList<>(); + Map + successfulRegistrations = new HashMap<>(); + + // Check to see if there are any new sub-clusters in this request + // list and create and register Unmanaged AM instance for the new ones + List newSubClusters = new ArrayList<>(); + for (SubClusterId subClusterId : subClusterSet) { + if (!subClusterId.equals(this.homeSubClusterId) + && !this.uamPool.hasUAMId(subClusterId.getId())) { + newSubClusters.add(subClusterId.getId()); + } + } + + if (newSubClusters.size() > 0) { + final RegisterApplicationMasterRequest registerRequest = + this.amRegistrationRequest; + final AMRMProxyApplicationContext appContext = getApplicationContext(); + ExecutorCompletionService + completionService = new ExecutorCompletionService<>(threadpool); + + for (final String subClusterId : newSubClusters) { + completionService + .submit(new Callable() { + @Override + public RegisterApplicationMasterResponseInfo call() + throws Exception { + + // Create a config loaded with federation on and subclusterId + // for each UAM + YarnConfiguration config = new YarnConfiguration(getConf()); + FederationProxyProviderUtil.updateConfForFederation(config, + subClusterId); + + RegisterApplicationMasterResponse uamResponse = null; + try { + // For appNameSuffix, use subClusterId of the home sub-cluster + uamResponse = uamPool.createAndRegisterNewUAM(subClusterId, + registerRequest, config, + appContext.getApplicationAttemptId().getApplicationId(), + amRegistrationResponse.getQueue(), appContext.getUser(), + homeSubClusterId.toString()); + } catch (Throwable e) { + LOG.error("Failed to register application master: " + + subClusterId + " Application: " + + appContext.getApplicationAttemptId(), e); + } + return new RegisterApplicationMasterResponseInfo(uamResponse, + SubClusterId.newInstance(subClusterId)); + } + }); + } + + // Wait for other sub-cluster resource managers to return the + // response and add it to the Map for returning to the caller + for (int i = 0; i < newSubClusters.size(); ++i) { + try { + Future future = + completionService.take(); + RegisterApplicationMasterResponseInfo uamResponse = future.get(); + if (LOG.isDebugEnabled()) { + LOG.debug("Received register application response from RM: " + + uamResponse.getSubClusterId()); + } + + if (uamResponse.getResponse() == null) { + failedRegistrations.add(uamResponse.getSubClusterId()); + } else { + LOG.info("Successfully registered unmanaged application master: " + + uamResponse.getSubClusterId() + " ApplicationId: " + + getApplicationContext().getApplicationAttemptId()); + successfulRegistrations.put(uamResponse.getSubClusterId(), + uamResponse.getResponse()); + } + } catch (Exception e) { + LOG.warn("Failed to register unmanaged application master: " + + " ApplicationId: " + + getApplicationContext().getApplicationAttemptId(), e); + } + } + } + + return new Registrations(successfulRegistrations, failedRegistrations); + } + + /** + * Merges the responses from other sub-clusters that we received + * asynchronously with the specified home cluster response and keeps track of + * the containers received from each sub-cluster resource managers. + */ + private AllocateResponse mergeAllocateResponses( + AllocateResponse homeResponse) { + // Timing issue, we need to remove the completed and then save the new ones. + if (LOG.isDebugEnabled()) { + LOG.debug("Remove containers: " + + homeResponse.getCompletedContainersStatuses()); + LOG.debug("Adding containers: " + homeResponse.getAllocatedContainers()); + } + removeFinishedContainersFromCache( + homeResponse.getCompletedContainersStatuses()); + cacheAllocatedContainers(homeResponse.getAllocatedContainers(), + this.homeSubClusterId); + + synchronized (this.asyncResponseSink) { + for (Entry> entry : asyncResponseSink + .entrySet()) { + SubClusterId subClusterId = entry.getKey(); + List responses = entry.getValue(); + if (responses.size() > 0) { + for (AllocateResponse response : responses) { + removeFinishedContainersFromCache( + response.getCompletedContainersStatuses()); + cacheAllocatedContainers(response.getAllocatedContainers(), + subClusterId); + mergeAllocateResponse(homeResponse, response, subClusterId); + } + responses.clear(); + } + } + } + + return homeResponse; + } + + /** + * Removes the finished containers from the local cache. + */ + private void removeFinishedContainersFromCache( + List finishedContainers) { + for (ContainerStatus container : finishedContainers) { + if (containerIdToSubClusterIdMap + .containsKey(container.getContainerId())) { + containerIdToSubClusterIdMap.remove(container.getContainerId()); + } + } + } + + /** + * Helper method for merging the responses from the secondary sub cluster RMs + * with the home response to return to the AM. + */ + private AllocateResponse mergeRegistrationResponses( + AllocateResponse homeResponse, + Map registrations) { + + for (Entry entry : + registrations.entrySet()) { + RegisterApplicationMasterResponse registration = entry.getValue(); + + if (!isNullOrEmpty(registration.getContainersFromPreviousAttempts())) { + List tempContainers = homeResponse.getAllocatedContainers(); + if (!isNullOrEmpty(tempContainers)) { + tempContainers + .addAll(registration.getContainersFromPreviousAttempts()); + homeResponse.setAllocatedContainers(tempContainers); + } else { + homeResponse.setAllocatedContainers( + registration.getContainersFromPreviousAttempts()); + } + cacheAllocatedContainers( + registration.getContainersFromPreviousAttempts(), entry.getKey()); + } + + if (!isNullOrEmpty(registration.getNMTokensFromPreviousAttempts())) { + List tempTokens = homeResponse.getNMTokens(); + if (!isNullOrEmpty(tempTokens)) { + tempTokens.addAll(registration.getNMTokensFromPreviousAttempts()); + homeResponse.setNMTokens(tempTokens); + } else { + homeResponse + .setNMTokens(registration.getNMTokensFromPreviousAttempts()); + } + } + } + + return homeResponse; + } + + private void mergeAllocateResponse(AllocateResponse homeResponse, + AllocateResponse otherResponse, SubClusterId otherRMAddress) { + + if (!isNullOrEmpty(otherResponse.getAllocatedContainers())) { + if (!isNullOrEmpty(homeResponse.getAllocatedContainers())) { + homeResponse.getAllocatedContainers() + .addAll(otherResponse.getAllocatedContainers()); + } else { + homeResponse + .setAllocatedContainers(otherResponse.getAllocatedContainers()); + } + } + + if (otherResponse.getAvailableResources() != null) { + if (homeResponse.getAvailableResources() != null) { + homeResponse.setAvailableResources( + Resources.add(homeResponse.getAvailableResources(), + otherResponse.getAvailableResources())); + } else { + homeResponse + .setAvailableResources(otherResponse.getAvailableResources()); + } + } + + if (!isNullOrEmpty(otherResponse.getCompletedContainersStatuses())) { + if (!isNullOrEmpty(homeResponse.getCompletedContainersStatuses())) { + homeResponse.getCompletedContainersStatuses() + .addAll(otherResponse.getCompletedContainersStatuses()); + } else { + homeResponse.setCompletedContainersStatuses( + otherResponse.getCompletedContainersStatuses()); + } + } + + if (!isNullOrEmpty(otherResponse.getUpdatedNodes())) { + if (!isNullOrEmpty(homeResponse.getUpdatedNodes())) { + homeResponse.getUpdatedNodes().addAll(otherResponse.getUpdatedNodes()); + } else { + homeResponse.setUpdatedNodes(otherResponse.getUpdatedNodes()); + } + } + + if (!isNullOrEmpty(otherResponse.getNMTokens())) { + if (!isNullOrEmpty(homeResponse.getNMTokens())) { + homeResponse.getNMTokens().addAll(otherResponse.getNMTokens()); + } else { + homeResponse.setNMTokens(otherResponse.getNMTokens()); + } + } + + PreemptionMessage homePreempMessage = homeResponse.getPreemptionMessage(); + PreemptionMessage otherPreempMessage = otherResponse.getPreemptionMessage(); + + if (homePreempMessage == null && otherPreempMessage != null) { + homeResponse.setPreemptionMessage(otherPreempMessage); + } + + if (homePreempMessage != null && otherPreempMessage != null) { + PreemptionContract par1 = homePreempMessage.getContract(); + PreemptionContract par2 = otherPreempMessage.getContract(); + + if (par1 == null && par2 != null) { + homePreempMessage.setContract(par2); + } + + if (par1 != null && par2 != null) { + par1.getResourceRequest().addAll(par2.getResourceRequest()); + par2.getContainers().addAll(par2.getContainers()); + } + + StrictPreemptionContract spar1 = homePreempMessage.getStrictContract(); + StrictPreemptionContract spar2 = otherPreempMessage.getStrictContract(); + + if (spar1 == null && spar2 != null) { + homePreempMessage.setStrictContract(spar2); + } + + if (spar1 != null && spar2 != null) { + spar1.getContainers().addAll(spar2.getContainers()); + } + } + } + + /** + * Add allocated containers to cache mapping. + */ + private void cacheAllocatedContainers(List containers, + SubClusterId subClusterId) { + for (Container container : containers) { + if (containerIdToSubClusterIdMap.containsKey(container.getId())) { + SubClusterId existingSubClusterId = + containerIdToSubClusterIdMap.get(container.getId()); + if (existingSubClusterId.equals(subClusterId)) { + // When RM fails over, the new RM master might send out the same + // container allocation more than once. Just move on in this case. + LOG.warn( + "Duplicate containerID: {} found in the allocated containers" + + " from same sub-cluster: {}, so ignoring.", + container.getId(), subClusterId); + } else { + // The same container allocation from different sub-clusters, + // something is wrong. + // TODO: YARN-6667 if some subcluster RM is configured wrong, we + // should not fail the entire heartbeat. + throw new YarnRuntimeException( + "Duplicate containerID found in the allocated containers. This" + + " can happen if the RM epoch is not configured properly." + + " ContainerId: " + container.getId().toString() + + " ApplicationId: " + + getApplicationContext().getApplicationAttemptId() + + " From RM: " + subClusterId + + " . Previous container was from sub-cluster: " + + existingSubClusterId); + } + } + + containerIdToSubClusterIdMap.put(container.getId(), subClusterId); + } + } + + /** + * Check to see if an AllocateRequest exists in the Map for the specified sub + * cluster. If not found, create a new one, copy the value of responseId and + * progress from the orignialAMRequest, save it in the specified Map and + * return the new instance. If found, just return the old instance. + */ + private static AllocateRequest findOrCreateAllocateRequestForSubCluster( + SubClusterId subClusterId, AllocateRequest originalAMRequest, + Map requestMap) { + AllocateRequest newRequest = null; + if (requestMap.containsKey(subClusterId)) { + newRequest = requestMap.get(subClusterId); + } else { + newRequest = createAllocateRequest(); + newRequest.setResponseId(originalAMRequest.getResponseId()); + newRequest.setProgress(originalAMRequest.getProgress()); + requestMap.put(subClusterId, newRequest); + } + + return newRequest; + } + + /** + * Create an empty AllocateRequest instance. + */ + private static AllocateRequest createAllocateRequest() { + AllocateRequest request = + AllocateRequest.newInstance(0, 0, null, null, null); + request.setAskList(new ArrayList()); + request.setReleaseList(new ArrayList()); + ResourceBlacklistRequest blackList = + ResourceBlacklistRequest.newInstance(null, null); + blackList.setBlacklistAdditions(new ArrayList()); + blackList.setBlacklistRemovals(new ArrayList()); + request.setResourceBlacklistRequest(blackList); + request.setUpdateRequests(new ArrayList()); + return request; + } + + /** + * Check to see if the specified containerId exists in the cache and log an + * error if not found. + * + * @param containerId the container id + * @param actionName the name of the action + * @return true if the container exists in the map, false otherwise + */ + private boolean warnIfNotExists(ContainerId containerId, String actionName) { + if (!this.containerIdToSubClusterIdMap.containsKey(containerId)) { + LOG.error("AM is trying to {} a container {} that does not exist. ", + actionName, containerId.toString()); + return false; + } + return true; + } + + /** + * Splits the specified request to send it to different sub clusters. The + * splitting algorithm is very simple. If the request does not have a node + * preference, the policy decides the sub cluster. If the request has a node + * preference and if locality is required, then it is sent to the sub cluster + * that contains the requested node. If node preference is specified and + * locality is not required, then the policy decides the sub cluster. + * + * @param askList the ask list to split + * @return the split asks + * @throws YarnException if split fails + */ + protected Map> splitResourceRequests( + List askList) throws YarnException { + return this.policyInterpreter.splitResourceRequests(askList); + } + + @VisibleForTesting + public int getUnmanagedAMPoolSize() { + return this.uamPool.getAllUAMIds().size(); + } + + /** + * Private structure for encapsulating SubClusterId and + * RegisterApplicationMasterResponse instances. + */ + private static class RegisterApplicationMasterResponseInfo { + private RegisterApplicationMasterResponse response; + private SubClusterId subClusterId; + + RegisterApplicationMasterResponseInfo( + RegisterApplicationMasterResponse response, SubClusterId subClusterId) { + this.response = response; + this.subClusterId = subClusterId; + } + + public RegisterApplicationMasterResponse getResponse() { + return response; + } + + public SubClusterId getSubClusterId() { + return subClusterId; + } + } + + /** + * Private structure for encapsulating SubClusterId and + * FinishApplicationMasterResponse instances. + */ + private static class FinishApplicationMasterResponseInfo { + private FinishApplicationMasterResponse response; + private String subClusterId; + + FinishApplicationMasterResponseInfo( + FinishApplicationMasterResponse response, String subClusterId) { + this.response = response; + this.subClusterId = subClusterId; + } + + public FinishApplicationMasterResponse getResponse() { + return response; + } + + public String getSubClusterId() { + return subClusterId; + } + } + + /** + * Private structure for encapsulating successful and failed application + * master registration responses. + */ + private static class Registrations { + private Map + successfulRegistrations; + private List failedRegistrations; + + Registrations( + Map + successfulRegistrations, + List failedRegistrations) { + this.successfulRegistrations = successfulRegistrations; + this.failedRegistrations = failedRegistrations; + } + + public Map + getSuccessfulRegistrations() { + return this.successfulRegistrations; + } + + public List getFailedRegistrations() { + return this.failedRegistrations; + } + } + + /** + * Utility method to check if the specified Collection is null or empty. + * + * @param c the collection object + * @param element type of the collection + * @return whether is it is null or empty + */ + public static boolean isNullOrEmpty(Collection c) { + return (c == null || c.size() == 0); + } + + /** + * Utility method to check if the specified Collection is null or empty. + * + * @param c the map object + * @param key type of the map + * @param value type of the map + * @return whether is it is null or empty + */ + public static boolean isNullOrEmpty(Map c) { + return (c == null || c.size() == 0); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 167d15d6fd1..22484b7c8ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -39,6 +39,8 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; @@ -64,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.LogAggregationContext; @@ -93,7 +96,7 @@ import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedContainersEvent; -import org.apache.hadoop.yarn.server.nodemanager.CMgrDecreaseContainersResourceEvent; +import org.apache.hadoop.yarn.server.nodemanager.CMgrUpdateContainersEvent; import org.apache.hadoop.yarn.server.nodemanager.CMgrSignalContainersEvent; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerManagerEvent; @@ -134,13 +137,14 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ChangeMonitoringContainerResourceEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerScheduler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerSchedulerEventType; + +import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.UpdateContainerSchedulerEvent; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState; @@ -162,6 +166,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -408,8 +413,24 @@ public class ContainerManagerImpl extends CompositeService implements throws IOException { StartContainerRequest req = rcs.getStartRequest(); ContainerLaunchContext launchContext = req.getContainerLaunchContext(); - ContainerTokenIdentifier token = - BuilderUtils.newContainerTokenIdentifier(req.getContainerToken()); + ContainerTokenIdentifier token = null; + if(rcs.getCapability() != null) { + ContainerTokenIdentifier originalToken = + BuilderUtils.newContainerTokenIdentifier(req.getContainerToken()); + token = new ContainerTokenIdentifier(originalToken.getContainerID(), + originalToken.getVersion(), originalToken.getNmHostAddress(), + originalToken.getApplicationSubmitter(), rcs.getCapability(), + originalToken.getExpiryTimeStamp(), originalToken.getMasterKeyId(), + originalToken.getRMIdentifier(), originalToken.getPriority(), + originalToken.getCreationTime(), + originalToken.getLogAggregationContext(), + originalToken.getNodeLabelExpression(), + originalToken.getContainerType(), originalToken.getExecutionType()); + + } else { + token = BuilderUtils.newContainerTokenIdentifier(req.getContainerToken()); + } + ContainerId containerId = token.getContainerID(); ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId(); @@ -1133,13 +1154,26 @@ public class ContainerManagerImpl extends CompositeService implements * Increase resource of a list of containers on this NodeManager. */ @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest requests) throws YarnException, IOException { + ContainerUpdateResponse resp = updateContainer( + ContainerUpdateRequest.newInstance(requests.getContainersToIncrease())); + return IncreaseContainersResourceResponse.newInstance( + resp.getSuccessfullyUpdatedContainers(), resp.getFailedRequests()); + } + + /** + * Update resource of a list of containers on this NodeManager. + */ + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { UserGroupInformation remoteUgi = getRemoteUgi(); NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi); authorizeUser(remoteUgi, nmTokenIdentifier); - List successfullyIncreasedContainers + List successfullyUpdatedContainers = new ArrayList(); Map failedContainers = new HashMap(); @@ -1151,7 +1185,7 @@ public class ContainerManagerImpl extends CompositeService implements synchronized (this.context) { // Process container resource increase requests for (org.apache.hadoop.yarn.api.records.Token token : - requests.getContainersToIncrease()) { + request.getContainersToUpdate()) { ContainerId containerId = null; try { if (token.getIdentifier() == null) { @@ -1168,10 +1202,8 @@ public class ContainerManagerImpl extends CompositeService implements // as container resource increase request will have come with // an updated NMToken. updateNMTokenIdentifier(nmTokenIdentifier); - Resource resource = containerTokenIdentifier.getResource(); - changeContainerResourceInternal(containerId, - containerTokenIdentifier.getVersion(), resource, true); - successfullyIncreasedContainers.add(containerId); + updateContainerInternal(containerId, containerTokenIdentifier); + successfullyUpdatedContainers.add(containerId); } catch (YarnException | InvalidToken e) { failedContainers.put(containerId, SerializedException.newInstance(e)); } catch (IOException e) { @@ -1179,14 +1211,14 @@ public class ContainerManagerImpl extends CompositeService implements } } } - return IncreaseContainersResourceResponse.newInstance( - successfullyIncreasedContainers, failedContainers); + return ContainerUpdateResponse.newInstance( + successfullyUpdatedContainers, failedContainers); } @SuppressWarnings("unchecked") - private void changeContainerResourceInternal(ContainerId containerId, - int containerVersion, Resource targetResource, boolean increase) - throws YarnException, IOException { + private void updateContainerInternal(ContainerId containerId, + ContainerTokenIdentifier containerTokenIdentifier) + throws YarnException, IOException { Container container = context.getContainers().get(containerId); // Check container existence if (container == null) { @@ -1198,64 +1230,81 @@ public class ContainerManagerImpl extends CompositeService implements + " is not handled by this NodeManager"); } } + // Check container version. + int currentVersion = container.getContainerTokenIdentifier().getVersion(); + if (containerTokenIdentifier.getVersion() <= currentVersion) { + throw RPCUtil.getRemoteException("Container " + containerId.toString() + + " has update version [" + currentVersion + "] >= requested version" + + " [" + containerTokenIdentifier.getVersion() + "]"); + } + // Check container state org.apache.hadoop.yarn.server.nodemanager. containermanager.container.ContainerState currentState = container.getContainerState(); - if (currentState != org.apache.hadoop.yarn.server. - nodemanager.containermanager.container.ContainerState.RUNNING) { + EnumSet allowedStates = EnumSet.of( + org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .ContainerState.RUNNING, + org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .ContainerState.SCHEDULED, + org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .ContainerState.LOCALIZING, + org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .ContainerState.REINITIALIZING, + org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .ContainerState.RELAUNCHING); + if (!allowedStates.contains(currentState)) { throw RPCUtil.getRemoteException("Container " + containerId.toString() + " is in " + currentState.name() + " state." + " Resource can only be changed when a container is in" - + " RUNNING state"); + + " RUNNING or SCHEDULED state"); } + // Check validity of the target resource. Resource currentResource = container.getResource(); - if (currentResource.equals(targetResource)) { - LOG.warn("Unable to change resource for container " - + containerId.toString() - + ". The target resource " - + targetResource.toString() - + " is the same as the current resource"); - return; + ExecutionType currentExecType = + container.getContainerTokenIdentifier().getExecutionType(); + boolean isResourceChange = false; + boolean isExecTypeUpdate = false; + Resource targetResource = containerTokenIdentifier.getResource(); + ExecutionType targetExecType = containerTokenIdentifier.getExecutionType(); + + // Is true if either the resources has increased or execution type + // updated from opportunistic to guaranteed + boolean isIncrease = false; + if (!currentResource.equals(targetResource)) { + isResourceChange = true; + isIncrease = Resources.fitsIn(currentResource, targetResource) + && !Resources.fitsIn(targetResource, currentResource); + } else if (!currentExecType.equals(targetExecType)) { + isExecTypeUpdate = true; + isIncrease = currentExecType == ExecutionType.OPPORTUNISTIC && + targetExecType == ExecutionType.GUARANTEED; } - if (increase && !Resources.fitsIn(currentResource, targetResource)) { - throw RPCUtil.getRemoteException("Unable to increase resource for " - + "container " + containerId.toString() - + ". The target resource " - + targetResource.toString() - + " is smaller than the current resource " - + currentResource.toString()); - } - if (!increase && - (!Resources.fitsIn(Resources.none(), targetResource) - || !Resources.fitsIn(targetResource, currentResource))) { - throw RPCUtil.getRemoteException("Unable to decrease resource for " - + "container " + containerId.toString() - + ". The target resource " - + targetResource.toString() - + " is not smaller than the current resource " - + currentResource.toString()); - } - if (increase) { - org.apache.hadoop.yarn.api.records.Container increasedContainer = - org.apache.hadoop.yarn.api.records.Container.newInstance( - containerId, null, null, targetResource, null, null); - if (context.getIncreasedContainers().putIfAbsent(containerId, - increasedContainer) != null){ - throw RPCUtil.getRemoteException("Container " + containerId.toString() - + " resource is being increased."); + if (isIncrease) { + org.apache.hadoop.yarn.api.records.Container increasedContainer = null; + if (isResourceChange) { + increasedContainer = + org.apache.hadoop.yarn.api.records.Container.newInstance( + containerId, null, null, targetResource, null, null, + currentExecType); + if (context.getIncreasedContainers().putIfAbsent(containerId, + increasedContainer) != null){ + throw RPCUtil.getRemoteException("Container " + containerId.toString() + + " resource is being increased -or- " + + "is undergoing ExecutionType promoted."); + } } } this.readLock.lock(); try { if (!serviceStopped) { - // Persist container resource change for recovery - this.context.getNMStateStore().storeContainerResourceChanged( - containerId, containerVersion, targetResource); - getContainersMonitor().handle( - new ChangeMonitoringContainerResourceEvent( - containerId, targetResource)); + // Dispatch message to ContainerScheduler to actually + // make the change. + dispatcher.getEventHandler().handle(new UpdateContainerSchedulerEvent( + container, containerTokenIdentifier, isResourceChange, + isExecTypeUpdate, isIncrease)); } else { throw new YarnException( "Unable to change container resource as the NodeManager is " @@ -1550,14 +1599,17 @@ public class ContainerManagerImpl extends CompositeService implements "Container Killed by ResourceManager")); } break; - case DECREASE_CONTAINERS_RESOURCE: - CMgrDecreaseContainersResourceEvent containersDecreasedEvent = - (CMgrDecreaseContainersResourceEvent) event; + case UPDATE_CONTAINERS: + CMgrUpdateContainersEvent containersDecreasedEvent = + (CMgrUpdateContainersEvent) event; for (org.apache.hadoop.yarn.api.records.Container container - : containersDecreasedEvent.getContainersToDecrease()) { + : containersDecreasedEvent.getContainersToUpdate()) { try { - changeContainerResourceInternal(container.getId(), - container.getVersion(), container.getResource(), false); + ContainerTokenIdentifier containerTokenIdentifier = + BuilderUtils.newContainerTokenIdentifier( + container.getContainerToken()); + updateContainerInternal(container.getId(), + containerTokenIdentifier); } catch (YarnException e) { LOG.error("Unable to decrease container resource", e); } catch (IOException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java index bd3f06d1fcb..f6e567c19ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java @@ -39,10 +39,10 @@ public interface Container extends EventHandler { Resource getResource(); - void setResource(Resource targetResource); - ContainerTokenIdentifier getContainerTokenIdentifier(); + void setContainerTokenIdentifier(ContainerTokenIdentifier token); + String getUser(); ContainerState getContainerState(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 46f8fa091fb..734a27ba83c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -148,9 +148,8 @@ public class ContainerImpl implements Container { private final Credentials credentials; private final NodeManagerMetrics metrics; private volatile ContainerLaunchContext launchContext; - private final ContainerTokenIdentifier containerTokenIdentifier; + private volatile ContainerTokenIdentifier containerTokenIdentifier; private final ContainerId containerId; - private volatile Resource resource; private final String user; private int version; private int exitCode = ContainerExitStatus.INVALID; @@ -201,7 +200,6 @@ public class ContainerImpl implements Container { YarnConfiguration.DEFAULT_NM_CONTAINER_DIAGNOSTICS_MAXIMUM_SIZE); this.containerTokenIdentifier = containerTokenIdentifier; this.containerId = containerTokenIdentifier.getContainerID(); - this.resource = containerTokenIdentifier.getResource(); this.diagnostics = new StringBuilder(); this.credentials = creds; this.metrics = metrics; @@ -269,13 +267,6 @@ public class ContainerImpl implements Container { this.exitCode = rcs.getExitCode(); this.recoveredAsKilled = rcs.getKilled(); this.diagnostics.append(rcs.getDiagnostics()); - Resource recoveredCapability = rcs.getCapability(); - if (recoveredCapability != null - && !this.resource.equals(recoveredCapability)) { - // resource capability had been updated before NM was down - this.resource = Resource.newInstance(recoveredCapability.getMemorySize(), - recoveredCapability.getVirtualCores()); - } this.version = rcs.getVersion(); this.remainingRetryAttempts = rcs.getRemainingRetryAttempts(); this.workDir = rcs.getWorkDir(); @@ -640,14 +631,8 @@ public class ContainerImpl implements Container { @Override public Resource getResource() { - return Resources.clone(this.resource); - } - - @Override - public void setResource(Resource targetResource) { - Resource currentResource = getResource(); - this.resource = Resources.clone(targetResource); - this.metrics.changeContainer(currentResource, targetResource); + return Resources.clone( + this.containerTokenIdentifier.getResource()); } @Override @@ -660,6 +645,16 @@ public class ContainerImpl implements Container { } } + @Override + public void setContainerTokenIdentifier(ContainerTokenIdentifier token) { + this.writeLock.lock(); + try { + this.containerTokenIdentifier = token; + } finally { + this.writeLock.unlock(); + } + } + @Override public String getWorkDir() { return workDir; @@ -833,7 +828,8 @@ public class ContainerImpl implements Container { AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", container.containerId.getApplicationAttemptId().getApplicationId(), container.containerId); - container.metrics.releaseContainer(container.resource); + container.metrics.releaseContainer( + container.containerTokenIdentifier.getResource()); container.sendFinishedEvents(); return ContainerState.DONE; } @@ -1397,6 +1393,10 @@ public class ContainerImpl implements Container { container.resourceSet = container.reInitContext.mergedResourceSet(container.resourceSet); container.isMarkeForKilling = false; + // Ensure Resources are decremented. + container.dispatcher.getEventHandler().handle( + new ContainerSchedulerEvent(container, + ContainerSchedulerEventType.CONTAINER_COMPLETED)); container.sendScheduleEvent(); } } @@ -1513,7 +1513,8 @@ public class ContainerImpl implements Container { @Override @SuppressWarnings("unchecked") public void transition(ContainerImpl container, ContainerEvent event) { - container.metrics.releaseContainer(container.resource); + container.metrics.releaseContainer( + container.containerTokenIdentifier.getResource()); if (container.containerMetrics != null) { container.containerMetrics .recordFinishTimeAndExitCode(clock.getTime(), container.exitCode); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java index d9cca8f523a..830782d6f48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsCpuResourceHandlerImpl.java @@ -26,8 +26,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; @@ -72,6 +74,7 @@ public class CGroupsCpuResourceHandlerImpl implements CpuResourceHandler { static final int MIN_PERIOD_US = 1000; @VisibleForTesting static final int CPU_DEFAULT_WEIGHT = 1024; // set by kernel + static final int CPU_DEFAULT_WEIGHT_OPPORTUNISTIC = 2; CGroupsCpuResourceHandlerImpl(CGroupsHandler cGroupsHandler) { this.cGroupsHandler = cGroupsHandler; @@ -181,16 +184,23 @@ public class CGroupsCpuResourceHandlerImpl implements CpuResourceHandler { @Override public List preStart(Container container) throws ResourceHandlerException { - String cgroupId = container.getContainerId().toString(); Resource containerResource = container.getResource(); cGroupsHandler.createCGroup(CPU, cgroupId); try { int containerVCores = containerResource.getVirtualCores(); - int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores; - cGroupsHandler - .updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_SHARES, - String.valueOf(cpuShares)); + ContainerTokenIdentifier id = container.getContainerTokenIdentifier(); + if (id != null && id.getExecutionType() == + ExecutionType.OPPORTUNISTIC) { + cGroupsHandler + .updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_SHARES, + String.valueOf(CPU_DEFAULT_WEIGHT_OPPORTUNISTIC)); + } else { + int cpuShares = CPU_DEFAULT_WEIGHT * containerVCores; + cGroupsHandler + .updateCGroupParam(CPU, cgroupId, CGroupsHandler.CGROUP_CPU_SHARES, + String.valueOf(cpuShares)); + } if (strictResourceUsageMode) { if (nodeVCores != containerVCores) { float containerCPU = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java index 8fc35a8232d..82bd36650c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java @@ -23,6 +23,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import java.util.HashSet; +import java.util.Set; + /** * Provides CGroups functionality. Implementations are expected to be * thread-safe @@ -54,6 +57,18 @@ public interface CGroupsHandler { String getName() { return name; } + + /** + * Get the list of valid cgroup names. + * @return The set of cgroup name strings + */ + public static Set getValidCGroups() { + HashSet validCgroups = new HashSet<>(); + for (CGroupController controller : CGroupController.values()) { + validCgroups.add(controller.getName()); + } + return validCgroups; + } } String CGROUP_FILE_TASKS = "tasks"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java index 85b01cd1e83..9fd20eb96d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java @@ -83,7 +83,7 @@ class CGroupsHandlerImpl implements CGroupsHandler { * @param mtab mount file location * @throws ResourceHandlerException if initialization failed */ - public CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor + CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor privilegedOperationExecutor, String mtab) throws ResourceHandlerException { this.cGroupPrefix = conf.get(YarnConfiguration. @@ -115,7 +115,7 @@ class CGroupsHandlerImpl implements CGroupsHandler { * PrivilegedContainerOperations * @throws ResourceHandlerException if initialization failed */ - public CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor + CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor privilegedOperationExecutor) throws ResourceHandlerException { this(conf, privilegedOperationExecutor, MTAB_FILE); } @@ -142,11 +142,18 @@ class CGroupsHandlerImpl implements CGroupsHandler { // the same hierarchy will be mounted at each mount point with the same // subsystem set. - Map> newMtab; + Map> newMtab = null; Map cPaths; try { - // parse mtab - newMtab = parseMtab(mtabFile); + if (this.cGroupMountPath != null && !this.enableCGroupMount) { + newMtab = ResourceHandlerModule. + parseConfiguredCGroupPath(this.cGroupMountPath); + } + + if (newMtab == null) { + // parse mtab + newMtab = parseMtab(mtabFile); + } // find cgroup controller paths cPaths = initializeControllerPathsFromMtab(newMtab); @@ -203,10 +210,8 @@ class CGroupsHandlerImpl implements CGroupsHandler { throws IOException { Map> ret = new HashMap<>(); BufferedReader in = null; - HashSet validCgroups = new HashSet<>(); - for (CGroupController controller : CGroupController.values()) { - validCgroups.add(controller.getName()); - } + Set validCgroups = + CGroupsHandler.CGroupController.getValidCGroups(); try { FileInputStream fis = new FileInputStream(new File(mtab)); @@ -487,7 +492,8 @@ class CGroupsHandlerImpl implements CGroupsHandler { try (BufferedReader inl = new BufferedReader(new InputStreamReader(new FileInputStream(cgf + "/tasks"), "UTF-8"))) { - if ((str = inl.readLine()) != null) { + str = inl.readLine(); + if (str != null) { LOG.debug("First line in cgroup tasks file: " + cgf + " " + str); } } catch (IOException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java index b4d2a9ababe..d159aad1bef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java @@ -25,7 +25,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; @@ -46,6 +48,8 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler { CGroupsMemoryResourceHandlerImpl.class); private static final CGroupsHandler.CGroupController MEMORY = CGroupsHandler.CGroupController.MEMORY; + private static final int OPPORTUNISTIC_SWAPPINESS = 100; + private static final int OPPORTUNISTIC_SOFT_LIMIT = 0; private CGroupsHandler cGroupsHandler; private int swappiness = 0; @@ -85,13 +89,15 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler { + ". Value must be between 0 and 100."); } float softLimitPerc = conf.getFloat( - YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE, - YarnConfiguration.DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE); + YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE, + YarnConfiguration. + DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE); softLimit = softLimitPerc / 100.0f; if (softLimitPerc < 0.0f || softLimitPerc > 100.0f) { throw new ResourceHandlerException( "Illegal value '" + softLimitPerc + "' " - + YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE + + YarnConfiguration. + NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE + ". Value must be between 0 and 100."); } return null; @@ -122,12 +128,23 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler { cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, String.valueOf(containerHardLimit) + "M"); - cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, - CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, - String.valueOf(containerSoftLimit) + "M"); - cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, - CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, - String.valueOf(swappiness)); + ContainerTokenIdentifier id = container.getContainerTokenIdentifier(); + if (id != null && id.getExecutionType() == + ExecutionType.OPPORTUNISTIC) { + cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, + CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, + String.valueOf(OPPORTUNISTIC_SOFT_LIMIT) + "M"); + cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, + CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, + String.valueOf(OPPORTUNISTIC_SWAPPINESS)); + } else { + cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, + CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, + String.valueOf(containerSoftLimit) + "M"); + cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, + CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, + String.valueOf(swappiness)); + } } catch (ResourceHandlerException re) { cGroupsHandler.deleteCGroup(MEMORY, cgroupId); LOG.warn("Could not update cgroup for container", re); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java index 7fc04bdb41e..4d137f0e1d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java @@ -31,6 +31,13 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg import org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler; import org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler; +import java.io.File; +import java.io.IOException; +import java.util.Set; +import java.util.HashSet; +import java.util.Map; +import java.util.HashMap; +import java.util.Arrays; import java.util.ArrayList; import java.util.List; @@ -113,8 +120,8 @@ public class ResourceHandlerModule { } private static TrafficControlBandwidthHandlerImpl - getTrafficControlBandwidthHandler(Configuration conf) - throws ResourceHandlerException { + getTrafficControlBandwidthHandler(Configuration conf) + throws ResourceHandlerException { if (conf.getBoolean(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED, YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_ENABLED)) { if (trafficControlBandwidthHandler == null) { @@ -137,8 +144,8 @@ public class ResourceHandlerModule { } public static OutboundBandwidthResourceHandler - getOutboundBandwidthResourceHandler(Configuration conf) - throws ResourceHandlerException { + getOutboundBandwidthResourceHandler(Configuration conf) + throws ResourceHandlerException { return getTrafficControlBandwidthHandler(conf); } @@ -176,7 +183,7 @@ public class ResourceHandlerModule { } private static CGroupsMemoryResourceHandlerImpl - getCgroupsMemoryResourceHandler( + getCgroupsMemoryResourceHandler( Configuration conf) throws ResourceHandlerException { if (cGroupsMemoryResourceHandler == null) { synchronized (MemoryResourceHandler.class) { @@ -229,4 +236,45 @@ public class ResourceHandlerModule { static void nullifyResourceHandlerChain() throws ResourceHandlerException { resourceHandlerChain = null; } + + /** + * If a cgroup mount directory is specified, it returns cgroup directories + * with valid names. + * The requirement is that each hierarchy has to be named with the comma + * separated names of subsystems supported. + * For example: /sys/fs/cgroup/cpu,cpuacct + * @param cgroupMountPath Root cgroup mount path (/sys/fs/cgroup in the + * example above) + * @return A path to cgroup subsystem set mapping in the same format as + * {@link CGroupsHandlerImpl#parseMtab(String)} + * @throws IOException if the specified directory cannot be listed + */ + public static Map> parseConfiguredCGroupPath( + String cgroupMountPath) throws IOException { + File cgroupDir = new File(cgroupMountPath); + File[] list = cgroupDir.listFiles(); + if (list == null) { + throw new IOException("Empty cgroup mount directory specified: " + + cgroupMountPath); + } + + Map> pathSubsystemMappings = new HashMap<>(); + Set validCGroups = + CGroupsHandler.CGroupController.getValidCGroups(); + for (File candidate: list) { + Set cgroupList = + new HashSet<>(Arrays.asList(candidate.getName().split(","))); + // Collect the valid subsystem names + cgroupList.retainAll(validCGroups); + if (!cgroupList.isEmpty()) { + if (candidate.isDirectory() && candidate.canWrite()) { + pathSubsystemMappings.put(candidate.getAbsolutePath(), cgroupList); + } else { + LOG.warn("The following cgroup is not a directory or it is not" + + " writable" + candidate.getAbsolutePath()); + } + } + } + return pathSubsystemMappings; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 8a464914e00..bb4b7f313fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; +import static org.apache.hadoop.util.Shell.getAllShells; + import java.io.DataInputStream; import java.io.File; import java.io.IOException; @@ -30,6 +32,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.Stack; import java.util.concurrent.Callable; @@ -81,8 +84,6 @@ import org.apache.hadoop.yarn.util.FSDownload; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import static org.apache.hadoop.util.Shell.getAllShells; - public class ContainerLocalizer { static final Log LOG = LogFactory.getLog(ContainerLocalizer.class); @@ -348,13 +349,13 @@ public class ContainerLocalizer { final List currentResources = new ArrayList(); // TODO: Synchronization?? - for (Iterator i = pendingResources.keySet().iterator(); - i.hasNext();) { - LocalResource rsrc = i.next(); + for (Iterator>> i = + pendingResources.entrySet().iterator(); i.hasNext();) { + Entry> mapEntry = i.next(); LocalResourceStatus stat = recordFactory.newRecordInstance(LocalResourceStatus.class); - stat.setResource(rsrc); - Future fPath = pendingResources.get(rsrc); + stat.setResource(mapEntry.getKey()); + Future fPath = mapEntry.getValue(); if (fPath.isDone()) { try { Path localPath = fPath.get(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java index 07b3deadc4d..a6aa337239f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java @@ -130,7 +130,7 @@ public class ContainerMetrics implements MetricsSource { /** * Simple metrics cache to help prevent re-registrations. */ - protected final static Map + private final static Map usageMetrics = new HashMap<>(); // Create a timer to unregister container metrics, // whose associated thread run as a daemon. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 6ee60bd17a4..13e74917af2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -741,19 +741,6 @@ public class ContainersMonitorImpl extends AbstractService implements } } - private void changeContainerResource( - ContainerId containerId, Resource resource) { - Container container = context.getContainers().get(containerId); - // Check container existence - if (container == null) { - LOG.warn("Container " + containerId.toString() + "does not exist"); - return; - } - // YARN-5860: Route this through the ContainerScheduler to - // fix containerAllocation - container.setResource(resource); - } - private void updateContainerMetrics(ContainersMonitorEvent monitoringEvent) { if (!containerMetricsEnabled || monitoringEvent == null) { return; @@ -902,8 +889,6 @@ public class ContainersMonitorImpl extends AbstractService implements int cpuVcores = changeEvent.getResource().getVirtualCores(); processTreeInfo.setResourceLimit(pmemLimit, vmemLimit, cpuVcores); } - - changeContainerResource(containerId, changeEvent.getResource()); } private void onStopMonitoringContainer( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java index 9839aeb6bb4..6e2b6174729 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java @@ -118,19 +118,40 @@ public class AllocationBasedResourceUtilizationTracker implements return false; } - float vCores = (float) cpuVcores / - getContainersMonitor().getVCoresAllocatedForContainers(); if (LOG.isDebugEnabled()) { LOG.debug("before cpuCheck [asked={} > allowed={}]", - this.containersAllocation.getCPU(), vCores); + this.containersAllocation.getCPU(), + getContainersMonitor().getVCoresAllocatedForContainers()); } - // Check CPU. - if (this.containersAllocation.getCPU() + vCores > 1.0f) { + // Check CPU. Compare using integral values of cores to avoid decimal + // inaccuracies. + if (!hasEnoughCpu(this.containersAllocation.getCPU(), + getContainersMonitor().getVCoresAllocatedForContainers(), cpuVcores)) { return false; } return true; } + /** + * Returns whether there is enough space for coresRequested in totalCores. + * Converts currentAllocation usage to nearest integer count before comparing, + * as floats are inherently imprecise. NOTE: this calculation assumes that + * requested core counts must be integers, and currentAllocation core count + * must also be an integer. + * + * @param currentAllocation The current allocation, a float value from 0 to 1. + * @param totalCores The total cores in the system. + * @param coresRequested The number of cores requested. + * @return True if currentAllocationtotalCores*coresRequested <= + * totalCores. + */ + public boolean hasEnoughCpu(float currentAllocation, long totalCores, + int coresRequested) { + // Must not cast here, as it would truncate the decimal digits. + return Math.round(currentAllocation * totalCores) + + coresRequested <= totalCores; + } + public ContainersMonitor getContainersMonitor() { return this.scheduler.getContainersMonitor(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java index 19243acd661..644bdae77a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java @@ -31,6 +31,9 @@ import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; + +import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor + .ChangeMonitoringContainerResourceEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor; @@ -136,6 +139,13 @@ public class ContainerScheduler extends AbstractService implements case CONTAINER_COMPLETED: onContainerCompleted(event.getContainer()); break; + case UPDATE_CONTAINER: + if (event instanceof UpdateContainerSchedulerEvent) { + onUpdateContainer((UpdateContainerSchedulerEvent) event); + } else { + LOG.error("Unknown event type on UpdateCOntainer: " + event.getType()); + } + break; case SHED_QUEUED_CONTAINERS: shedQueuedOpportunisticContainers(); break; @@ -145,6 +155,70 @@ public class ContainerScheduler extends AbstractService implements } } + /** + * We assume that the ContainerManager has already figured out what kind + * of update this is. + */ + private void onUpdateContainer(UpdateContainerSchedulerEvent updateEvent) { + ContainerId containerId = updateEvent.getContainer().getContainerId(); + if (updateEvent.isResourceChange()) { + if (runningContainers.containsKey(containerId)) { + this.utilizationTracker.subtractContainerResource( + updateEvent.getContainer()); + updateEvent.getContainer().setContainerTokenIdentifier( + updateEvent.getUpdatedToken()); + this.utilizationTracker.addContainerResources( + updateEvent.getContainer()); + getContainersMonitor().handle( + new ChangeMonitoringContainerResourceEvent(containerId, + updateEvent.getUpdatedToken().getResource())); + } else { + // Is Queued or localizing.. + updateEvent.getContainer().setContainerTokenIdentifier( + updateEvent.getUpdatedToken()); + } + try { + // Persist change in the state store. + this.context.getNMStateStore().storeContainerResourceChanged( + containerId, + updateEvent.getUpdatedToken().getVersion(), + updateEvent.getUpdatedToken().getResource()); + } catch (IOException e) { + LOG.warn("Could not store container [" + containerId + "] resource " + + "change..", e); + } + } + + if (updateEvent.isExecTypeUpdate()) { + updateEvent.getContainer().setContainerTokenIdentifier( + updateEvent.getUpdatedToken()); + // If this is a running container.. just change the execution type + // and be done with it. + if (!runningContainers.containsKey(containerId)) { + // Promotion or not (Increase signifies either a promotion + // or container size increase) + if (updateEvent.isIncrease()) { + // Promotion of queued container.. + if (queuedOpportunisticContainers.remove(containerId) != null) { + queuedGuaranteedContainers.put(containerId, + updateEvent.getContainer()); + } + //Kill opportunistic containers if any to make room for + // promotion request + killOpportunisticContainers(updateEvent.getContainer()); + } else { + // Demotion of queued container.. Should not happen too often + // since you should not find too many queued guaranteed + // containers + if (queuedGuaranteedContainers.remove(containerId) != null) { + queuedOpportunisticContainers.put(containerId, + updateEvent.getContainer()); + } + } + } + } + } + /** * Return number of queued containers. * @return Number of queued containers. @@ -392,7 +466,10 @@ public class ContainerScheduler extends AbstractService implements ResourceUtilization resourcesToFreeUp) { return resourcesToFreeUp.getPhysicalMemory() <= 0 && resourcesToFreeUp.getVirtualMemory() <= 0 && - resourcesToFreeUp.getCPU() <= 0.0f; + // Convert the number of cores to nearest integral number, due to + // imprecision of direct float comparison. + Math.round(resourcesToFreeUp.getCPU() + * getContainersMonitor().getVCoresAllocatedForContainers()) <= 0; } private ResourceUtilization resourcesToFreeUp( @@ -463,4 +540,8 @@ public class ContainerScheduler extends AbstractService implements return this.context.getContainerManager().getContainersMonitor(); } + @VisibleForTesting + public ResourceUtilization getCurrentUtilization() { + return this.utilizationTracker.getCurrentUtilization(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java index 086cb9bd5a6..917eda09af6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerSchedulerEventType.java @@ -24,6 +24,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler; public enum ContainerSchedulerEventType { SCHEDULE_CONTAINER, CONTAINER_COMPLETED, + UPDATE_CONTAINER, // Producer: Node HB response - RM has asked to shed the queue SHED_QUEUED_CONTAINERS, } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/UpdateContainerSchedulerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/UpdateContainerSchedulerEvent.java new file mode 100644 index 00000000000..5384b7e8dbc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/UpdateContainerSchedulerEvent.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler; + +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .Container; +/** + * Update Event consumed by the {@link ContainerScheduler}. + */ +public class UpdateContainerSchedulerEvent extends ContainerSchedulerEvent { + + private ContainerTokenIdentifier updatedToken; + private boolean isResourceChange; + private boolean isExecTypeUpdate; + private boolean isIncrease; + + /** + * Create instance of Event. + * + * @param originalContainer Original Container. + * @param updatedToken Updated Container Token. + * @param isResourceChange is this a Resource Change. + * @param isExecTypeUpdate is this an ExecTypeUpdate. + * @param isIncrease is this a Container Increase. + */ + public UpdateContainerSchedulerEvent(Container originalContainer, + ContainerTokenIdentifier updatedToken, boolean isResourceChange, + boolean isExecTypeUpdate, boolean isIncrease) { + super(originalContainer, ContainerSchedulerEventType.UPDATE_CONTAINER); + this.updatedToken = updatedToken; + this.isResourceChange = isResourceChange; + this.isExecTypeUpdate = isExecTypeUpdate; + this.isIncrease = isIncrease; + } + + /** + * Update Container Token. + * + * @return Container Token. + */ + public ContainerTokenIdentifier getUpdatedToken() { + return updatedToken; + } + + /** + * isResourceChange. + * @return isResourceChange. + */ + public boolean isResourceChange() { + return isResourceChange; + } + + /** + * isExecTypeUpdate. + * @return isExecTypeUpdate. + */ + public boolean isExecTypeUpdate() { + return isExecTypeUpdate; + } + + /** + * isIncrease. + * @return isIncrease. + */ + public boolean isIncrease() { + return isIncrease; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java index bca4fdc8c9e..7a8928546ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java @@ -27,6 +27,7 @@ import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Writer; +import java.util.Arrays; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -39,7 +40,6 @@ import java.util.regex.Pattern; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -51,6 +51,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsCpuResourceHandlerImpl; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.SystemClock; @@ -87,11 +89,11 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { private long deleteCgroupTimeout; private long deleteCgroupDelay; - // package private for testing purposes + @VisibleForTesting Clock clock; private float yarnProcessors; - int nodeVCores; + private int nodeVCores; public CgroupsLCEResourcesHandler() { this.controllerPaths = new HashMap(); @@ -132,8 +134,10 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { this.strictResourceUsageMode = conf .getBoolean( - YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, - YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE); + YarnConfiguration + .NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, + YarnConfiguration + .DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE); int len = cgroupPrefix.length(); if (cgroupPrefix.charAt(len - 1) == '/') { @@ -169,8 +173,10 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { if (systemProcessors != (int) yarnProcessors) { LOG.info("YARN containers restricted to " + yarnProcessors + " cores"); int[] limits = getOverallLimits(yarnProcessors); - updateCgroup(CONTROLLER_CPU, "", CPU_PERIOD_US, String.valueOf(limits[0])); - updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(limits[1])); + updateCgroup(CONTROLLER_CPU, "", CPU_PERIOD_US, + String.valueOf(limits[0])); + updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, + String.valueOf(limits[1])); } else if (CGroupsCpuResourceHandlerImpl.cpuLimitsExist( pathForCgroup(CONTROLLER_CPU, ""))) { LOG.info("Removing CPU constraints for YARN containers."); @@ -178,8 +184,8 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { } } - int[] getOverallLimits(float yarnProcessors) { - return CGroupsCpuResourceHandlerImpl.getOverallLimits(yarnProcessors); + int[] getOverallLimits(float yarnProcessorsArg) { + return CGroupsCpuResourceHandlerImpl.getOverallLimits(yarnProcessorsArg); } @@ -204,7 +210,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { LOG.debug("createCgroup: " + path); } - if (! new File(path).mkdir()) { + if (!new File(path).mkdir()) { throw new IOException("Failed to create cgroup at " + path); } } @@ -251,7 +257,8 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { try (BufferedReader inl = new BufferedReader(new InputStreamReader(new FileInputStream(cgf + "/tasks"), "UTF-8"))) { - if ((str = inl.readLine()) != null) { + str = inl.readLine(); + if (str != null) { LOG.debug("First line in cgroup tasks file: " + cgf + " " + str); } } catch (IOException e) { @@ -337,9 +344,9 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { (containerVCores * yarnProcessors) / (float) nodeVCores; int[] limits = getOverallLimits(containerCPU); updateCgroup(CONTROLLER_CPU, containerName, CPU_PERIOD_US, - String.valueOf(limits[0])); + String.valueOf(limits[0])); updateCgroup(CONTROLLER_CPU, containerName, CPU_QUOTA_US, - String.valueOf(limits[1])); + String.valueOf(limits[1])); } } } @@ -400,6 +407,8 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { private Map> parseMtab() throws IOException { Map> ret = new HashMap>(); BufferedReader in = null; + Set validCgroups = + CGroupsHandler.CGroupController.getValidCGroups(); try { FileInputStream fis = new FileInputStream(new File(getMtabFileName())); @@ -415,8 +424,11 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { String options = m.group(3); if (type.equals(CGROUPS_FSTYPE)) { - HashSet value = Sets.newHashSet(options.split(",")); - ret.put(path, value); + Set cgroupList = + new HashSet<>(Arrays.asList(options.split(","))); + // Collect the valid subsystem names + cgroupList.retainAll(validCgroups); + ret.put(path, cgroupList); } } } @@ -448,7 +460,16 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler { private void initializeControllerPaths() throws IOException { String controllerPath; - Map> parsedMtab = parseMtab(); + Map> parsedMtab = null; + + if (this.cgroupMountPath != null && !this.cgroupMount) { + parsedMtab = ResourceHandlerModule. + parseConfiguredCGroupPath(this.cgroupMountPath); + } + + if (parsedMtab == null) { + parsedMtab = parseMtab(); + } // CPU diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java index d32b271e46a..6425da64932 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java @@ -31,17 +31,17 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.BODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class AllApplicationsPage extends NMView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setTitle("Applications running on this node"); set(DATATABLES_ID, "applications"); @@ -88,22 +88,22 @@ public class AllApplicationsPage extends NMView { .table("#applications") .thead() .tr() - .td()._("ApplicationId")._() - .td()._("ApplicationState")._() - ._() - ._() + .td().__("ApplicationId").__() + .td().__("ApplicationState").__() + .__() + .__() .tbody(); for (Entry entry : this.nmContext .getApplications().entrySet()) { AppInfo info = new AppInfo(entry.getValue()); tableBody .tr() - .td().a(url("application", info.getId()), info.getId())._() - .td()._(info.getState()) - ._() - ._(); + .td().a(url("application", info.getId()), info.getId()).__() + .td().__(info.getState()) + .__() + .__(); } - tableBody._()._()._(); + tableBody.__().__().__(); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java index 24b85753b18..3fc6f3cb9df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java @@ -31,17 +31,17 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.BODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class AllContainersPage extends NMView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setTitle("All containers running on this node"); set(DATATABLES_ID, "containers"); @@ -82,24 +82,24 @@ public class AllContainersPage extends NMView { .table("#containers") .thead() .tr() - .td()._("ContainerId")._() - .td()._("ContainerState")._() - .td()._("logs")._() - ._() - ._().tbody(); + .td().__("ContainerId").__() + .td().__("ContainerState").__() + .td().__("logs").__() + .__() + .__().tbody(); for (Entry entry : this.nmContext .getContainers().entrySet()) { ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue()); tableBody .tr() .td().a(url("container", info.getId()), info.getId()) - ._() - .td()._(info.getState())._() + .__() + .td().__(info.getState()).__() .td() - .a(url(info.getShortLogLink()), "logs")._() - ._(); + .a(url(info.getShortLogLink()), "logs").__() + .__(); } - tableBody._()._()._(); + tableBody.__().__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java index 2783b18699c..00f80ef3b88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java @@ -30,12 +30,11 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -43,7 +42,7 @@ import com.google.inject.Inject; public class ApplicationPage extends NMView implements YarnWebParams { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "containers"); @@ -80,30 +79,30 @@ public class ApplicationPage extends NMView implements YarnWebParams { try { applicationID = ApplicationId.fromString($(APPLICATION_ID)); } catch (IllegalArgumentException e) { - html.p()._("Invalid Application Id " + $(APPLICATION_ID))._(); + html.p().__("Invalid Application Id " + $(APPLICATION_ID)).__(); return; } DIV div = html.div("#content"); Application app = this.nmContext.getApplications().get(applicationID); if (app == null) { div.h1("Unknown application with id " + applicationID - + ". Application might have been completed")._(); + + ". Application might have been completed").__(); return; } AppInfo info = new AppInfo(app); info("Application's information") - ._("ApplicationId", info.getId()) - ._("ApplicationState", info.getState()) - ._("User", info.getUser()); - TABLE containersListBody = html._(InfoBlock.class) + .__("ApplicationId", info.getId()) + .__("ApplicationState", info.getState()) + .__("User", info.getUser()); + TABLE containersListBody = html.__(InfoBlock.class) .table("#containers"); for (String containerIdStr : info.getContainers()) { containersListBody .tr().td() .a(url("container", containerIdStr), containerIdStr) - ._()._(); + .__().__(); } - containersListBody._(); + containersListBody.__(); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java index 3e5f4d2e49d..f619e2ff483 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java @@ -37,12 +37,11 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.Context; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.NotFoundException; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.PRE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -51,7 +50,7 @@ public class ContainerLogsPage extends NMView { public static final String REDIRECT_URL = "redirect.url"; - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { String redirectUrl = $(REDIRECT_URL); if (redirectUrl == null || redirectUrl.isEmpty()) { set(TITLE, join("Logs for ", $(CONTAINER_ID))); @@ -142,10 +141,10 @@ public class ContainerLogsPage extends NMView { try { long toRead = end - start; if (toRead < logFile.length()) { - html.p()._("Showing " + toRead + " bytes. Click ") + html.p().__("Showing " + toRead + " bytes. Click ") .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), logFile.getName(), "?start=0"), "here"). - _(" for full log")._(); + __(" for full log").__(); } IOUtils.skipFully(logByteStream, start); @@ -160,12 +159,12 @@ public class ContainerLogsPage extends NMView { while ((len = reader.read(cbuf, 0, currentToRead)) > 0 && toRead > 0) { - pre._(new String(cbuf, 0, len)); + pre.__(new String(cbuf, 0, len)); toRead = toRead - len; currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; } - pre._(); + pre.__(); reader.close(); } catch (IOException e) { @@ -199,7 +198,7 @@ public class ContainerLogsPage extends NMView { .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), logFile.getName(), "?start=-4096"), logFile.getName() + " : Total file length is " - + logFile.length() + " bytes.")._(); + + logFile.length() + " bytes.").__(); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java index 4beccc937a4..8117dcad691 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java @@ -25,11 +25,10 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -38,7 +37,7 @@ import com.google.inject.Inject; public class ContainerPage extends NMView implements YarnWebParams { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); setTitle("Container " + $(CONTAINER_ID)); @@ -65,7 +64,7 @@ public class ContainerPage extends NMView implements YarnWebParams { try { containerID = ContainerId.fromString($(CONTAINER_ID)); } catch (IllegalArgumentException e) { - html.p()._("Invalid containerId " + $(CONTAINER_ID))._(); + html.p().__("Invalid containerId " + $(CONTAINER_ID)).__(); return; } @@ -73,22 +72,22 @@ public class ContainerPage extends NMView implements YarnWebParams { Container container = this.nmContext.getContainers().get(containerID); if (container == null) { div.h1("Unknown Container. Container might have completed, " - + "please go back to the previous page and retry.")._(); + + "please go back to the previous page and retry.").__(); return; } ContainerInfo info = new ContainerInfo(this.nmContext, container); info("Container information") - ._("ContainerID", info.getId()) - ._("ContainerState", info.getState()) - ._("ExitStatus", info.getExitStatus()) - ._("Diagnostics", info.getDiagnostics()) - ._("User", info.getUser()) - ._("TotalMemoryNeeded", info.getMemoryNeeded()) - ._("TotalVCoresNeeded", info.getVCoresNeeded()) - ._("ExecutionType", info.getExecutionType()) - ._("logs", info.getShortLogLink(), "Link to logs"); - html._(InfoBlock.class); + .__("ContainerID", info.getId()) + .__("ContainerState", info.getState()) + .__("ExitStatus", info.getExitStatus()) + .__("Diagnostics", info.getDiagnostics()) + .__("User", info.getUser()) + .__("TotalMemoryNeeded", info.getMemoryNeeded()) + .__("TotalVCoresNeeded", info.getVCoresNeeded()) + .__("ExecutionType", info.getExecutionType()) + .__("logs", info.getShortLogLink(), "Link to logs"); + html.__(InfoBlock.class); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java index 7475c4d2f51..5e81ed5f536 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMErrorsAndWarningsPage.java @@ -32,7 +32,7 @@ public class NMErrorsAndWarningsPage extends NMView { } @Override - protected void preHead(HtmlPage.Page.HTML html) { + protected void preHead(HtmlPage.Page.HTML<__> html) { commonPreHead(html); String title = "Errors and Warnings in the NodeManager"; setTitle(title); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java index dc21b4afeab..a76d2eff428 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMView.java @@ -27,11 +27,11 @@ import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout; public class NMView extends TwoColumnLayout { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); } - protected void commonPreHead(Page.HTML<_> html) { + protected void commonPreHead(Page.HTML<__> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java index 857a4f91292..0a2731eb5fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java @@ -24,7 +24,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -55,29 +55,29 @@ public class NavBlock extends HtmlBlock implements YarnWebParams { WebAppUtils.getResolvedRMWebAppURLWithScheme(this.conf); Hamlet.UL> ul = html .div("#nav") - .h3()._("ResourceManager")._() + .h3().__("ResourceManager").__() .ul() - .li().a(RMWebAppURL, "RM Home")._()._() - .h3()._("NodeManager")._() // TODO: Problem if no header like this + .li().a(RMWebAppURL, "RM Home").__().__() + .h3().__("NodeManager").__() // TODO: Problem if no header like this .ul() .li() - .a(url("node"), "Node Information")._() + .a(url("node"), "Node Information").__() .li() .a(url("allApplications"), "List of Applications") - ._() + .__() .li() - .a(url("allContainers"), "List of Containers")._() - ._() + .a(url("allContainers"), "List of Containers").__() + .__() .h3("Tools") .ul() - .li().a("/conf", "Configuration")._() - .li().a("/logs", "Local logs")._() - .li().a("/stacks", "Server stacks")._() - .li().a("/jmx?qry=Hadoop:*", "Server metrics")._(); + .li().a("/conf", "Configuration").__() + .li().a("/logs", "Local logs").__() + .li().a("/stacks", "Server stacks").__() + .li().a("/jmx?qry=Hadoop:*", "Server metrics").__(); if (addErrorsAndWarningsLink) { - ul.li().a(url("errors-and-warnings"), "Errors/Warnings")._(); + ul.li().a(url("errors-and-warnings"), "Errors/Warnings").__(); } - ul._()._(); + ul.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java index f51f0c551af..7005f411c5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java @@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.HTML; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -39,7 +39,7 @@ public class NodePage extends NMView { private static final long BYTES_IN_MB = 1024 * 1024; @Override - protected void commonPreHead(HTML<_> html) { + protected void commonPreHead(HTML<__> html) { super.commonPreHead(html); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}"); @@ -65,29 +65,29 @@ public class NodePage extends NMView { protected void render(Block html) { NodeInfo info = new NodeInfo(this.context, this.resourceView); info("NodeManager information") - ._("Total Vmem allocated for Containers", + .__("Total Vmem allocated for Containers", StringUtils.byteDesc(info.getTotalVmemAllocated() * BYTES_IN_MB)) - ._("Vmem enforcement enabled", + .__("Vmem enforcement enabled", info.isVmemCheckEnabled()) - ._("Total Pmem allocated for Container", + .__("Total Pmem allocated for Container", StringUtils.byteDesc(info.getTotalPmemAllocated() * BYTES_IN_MB)) - ._("Pmem enforcement enabled", + .__("Pmem enforcement enabled", info.isPmemCheckEnabled()) - ._("Total VCores allocated for Containers", + .__("Total VCores allocated for Containers", String.valueOf(info.getTotalVCoresAllocated())) - ._("NodeHealthyStatus", + .__("NodeHealthyStatus", info.getHealthStatus()) - ._("LastNodeHealthTime", new Date( + .__("LastNodeHealthTime", new Date( info.getLastNodeUpdateTime())) - ._("NodeHealthReport", + .__("NodeHealthReport", info.getHealthReport()) - ._("NodeManager started on", new Date( + .__("NodeManager started on", new Date( info.getNMStartupTime())) - ._("NodeManager Version:", info.getNMBuildVersion() + + .__("NodeManager Version:", info.getNMBuildVersion() + " on " + info.getNMVersionBuiltOn()) - ._("Hadoop Version:", info.getHadoopBuildVersion() + + .__("Hadoop Version:", info.getHadoopBuildVersion() + " on " + info.getHadoopVersionBuiltOn()); - html._(InfoBlock.class); + html.__(InfoBlock.class); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c index a6d7a9ce338..12dbc4c5e4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c @@ -20,35 +20,55 @@ #include #include "configuration.h" -#include "container-executor.h" +#include "util.h" +#define __STDC_FORMAT_MACROS #include #include #include -#include #include #include #include -#include -#include -#include #define MAX_SIZE 10 +static const char COMMENT_BEGIN_CHAR = '#'; +static const char SECTION_LINE_BEGIN_CHAR = '['; +static const char SECTION_LINE_END_CHAR = ']'; + +//clean up method for freeing section +void free_section(struct section *section) { + int i = 0; + for (i = 0; i < section->size; i++) { + if (section->kv_pairs[i]->key != NULL) { + free((void *) section->kv_pairs[i]->key); + } + if (section->kv_pairs[i]->value != NULL) { + free((void *) section->kv_pairs[i]->value); + } + free(section->kv_pairs[i]); + } + if (section->kv_pairs) { + free(section->kv_pairs); + section->kv_pairs = NULL; + } + if (section->name) { + free(section->name); + section->name = NULL; + } + section->size = 0; +} + //clean up method for freeing configuration -void free_configurations(struct configuration *cfg) { +void free_configuration(struct configuration *cfg) { int i = 0; for (i = 0; i < cfg->size; i++) { - if (cfg->confdetails[i]->key != NULL) { - free((void *)cfg->confdetails[i]->key); + if (cfg->sections[i] != NULL) { + free_section(cfg->sections[i]); } - if (cfg->confdetails[i]->value != NULL) { - free((void *)cfg->confdetails[i]->value); - } - free(cfg->confdetails[i]); } - if (cfg->size > 0) { - free(cfg->confdetails); + if (cfg->sections) { + free(cfg->sections); } cfg->size = 0; } @@ -65,13 +85,13 @@ static int is_only_root_writable(const char *file) { } if (file_stat.st_uid != 0) { fprintf(ERRORFILE, "File %s must be owned by root, but is owned by %" PRId64 "\n", - file, (int64_t)file_stat.st_uid); + file, (int64_t) file_stat.st_uid); return 0; } if ((file_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) { fprintf(ERRORFILE, - "File %s must not be world or group writable, but is %03lo\n", - file, (unsigned long)file_stat.st_mode & (~S_IFMT)); + "File %s must not be world or group writable, but is %03lo\n", + file, (unsigned long) file_stat.st_mode & (~S_IFMT)); return 0; } return 1; @@ -82,9 +102,9 @@ static int is_only_root_writable(const char *file) { * * NOTE: relative path names are resolved relative to the second argument not getwd(3) */ -char *resolve_config_path(const char* file_name, const char *root) { +char *resolve_config_path(const char *file_name, const char *root) { const char *real_fname = NULL; - char buffer[EXECUTOR_PATH_MAX*2 + 1]; + char buffer[EXECUTOR_PATH_MAX * 2 + 1]; if (file_name[0] == '/') { real_fname = file_name; @@ -96,7 +116,7 @@ char *resolve_config_path(const char* file_name, const char *root) { #ifdef HAVE_CANONICALIZE_FILE_NAME char * ret = (real_fname == NULL) ? NULL : canonicalize_file_name(real_fname); #else - char * ret = (real_fname == NULL) ? NULL : realpath(real_fname, NULL); + char *ret = (real_fname == NULL) ? NULL : realpath(real_fname, NULL); #endif #ifdef DEBUG fprintf(stderr,"ret = %s\n", ret); @@ -112,10 +132,19 @@ char *resolve_config_path(const char* file_name, const char *root) { * configuration and potentially cause damage. * returns 0 if permissions are ok */ -int check_configuration_permissions(const char* file_name) { +int check_configuration_permissions(const char *file_name) { + if (!file_name) { + return -1; + } + // copy the input so that we can modify it with dirname - char* dir = strdup(file_name); - char* buffer = dir; + char *dir = strdup(file_name); + if (!dir) { + fprintf(stderr, "Failed to make a copy of filename in %s.\n", __func__); + return -1; + } + + char *buffer = dir; do { if (!is_only_root_writable(dir)) { free(buffer); @@ -128,167 +157,396 @@ int check_configuration_permissions(const char* file_name) { } /** - * Trim whitespace from beginning and end. -*/ -char* trim(char* input) -{ - char *val_begin; - char *val_end; - char *ret; + * Read a line from the the config file and return it without the newline. + * The caller must free the memory allocated. + */ +static char *read_config_line(FILE *conf_file) { + char *line = NULL; + size_t linesize = 100000; + ssize_t size_read = 0; + size_t eol = 0; - if (input == NULL) { - return NULL; + line = (char *) malloc(linesize); + if (line == NULL) { + fprintf(ERRORFILE, "malloc failed while reading configuration file.\n"); + exit(OUT_OF_MEMORY); + } + size_read = getline(&line, &linesize, conf_file); + + //feof returns true only after we read past EOF. + //so a file with no new line, at last can reach this place + //if size_read returns negative check for eof condition + if (size_read == -1) { + free(line); + line = NULL; + if (!feof(conf_file)) { + fprintf(ERRORFILE, "Line read returned -1 without eof\n"); + exit(INVALID_CONFIG_FILE); } - - val_begin = input; - val_end = input + strlen(input); - - while (val_begin < val_end && isspace(*val_begin)) - val_begin++; - while (val_end > val_begin && isspace(*(val_end - 1))) - val_end--; - - ret = (char *) malloc( - sizeof(char) * (val_end - val_begin + 1)); - if (ret == NULL) { - fprintf(ERRORFILE, "Allocation error\n"); - exit(OUT_OF_MEMORY); + } else { + eol = strlen(line) - 1; + if (line[eol] == '\n') { + //trim the ending new line + line[eol] = '\0'; } - - strncpy(ret, val_begin, val_end - val_begin); - ret[val_end - val_begin] = '\0'; - return ret; + } + return line; } -void read_config(const char* file_name, struct configuration *cfg) { - FILE *conf_file; - char *line; - char *equaltok; - char *temp_equaltok; - size_t linesize = 1000; - int size_read = 0; - - if (file_name == NULL) { - fprintf(ERRORFILE, "Null configuration filename passed in\n"); - exit(INVALID_CONFIG_FILE); +/** + * Return if the given line is a comment line. + * + * @param line the line to check + * + * @return 1 if the line is a comment line, 0 otherwise + */ +static int is_comment_line(const char *line) { + if (line != NULL) { + return (line[0] == COMMENT_BEGIN_CHAR); } + return 0; +} - #ifdef DEBUG - fprintf(LOGFILE, "read_config :Conf file name is : %s \n", file_name); - #endif - - //allocate space for ten configuration items. - cfg->confdetails = (struct confentry **) malloc(sizeof(struct confentry *) - * MAX_SIZE); - cfg->size = 0; - conf_file = fopen(file_name, "r"); - if (conf_file == NULL) { - fprintf(ERRORFILE, "Invalid conf file provided : %s \n", file_name); - exit(INVALID_CONFIG_FILE); +/** + * Return if the given line is a section start line. + * + * @param line the line to check + * + * @return 1 if the line is a section start line, 0 otherwise + */ +static int is_section_start_line(const char *line) { + size_t len = 0; + if (line != NULL) { + len = strlen(line) - 1; + return (line[0] == SECTION_LINE_BEGIN_CHAR + && line[len] == SECTION_LINE_END_CHAR); } - while(!feof(conf_file)) { - line = (char *) malloc(linesize); - if(line == NULL) { - fprintf(ERRORFILE, "malloc failed while reading configuration file.\n"); + return 0; +} + +/** + * Return the name of the section from the given section start line. The + * caller must free the memory used. + * + * @param line the line to extract the section name from + * + * @return string with the name of the section, NULL otherwise + */ +static char *get_section_name(const char *line) { + char *name = NULL; + size_t len; + + if (is_section_start_line(line)) { + // length of the name is the line - 2(to account for '[' and ']') + len = strlen(line) - 2; + name = (char *) malloc(len + 1); + if (name == NULL) { + fprintf(ERRORFILE, "malloc failed while reading section name.\n"); exit(OUT_OF_MEMORY); } - size_read = getline(&line,&linesize,conf_file); + strncpy(name, line + sizeof(char), len); + name[len] = '\0'; + } + return name; +} - //feof returns true only after we read past EOF. - //so a file with no new line, at last can reach this place - //if size_read returns negative check for eof condition - if (size_read == -1) { - free(line); - if(!feof(conf_file)){ - exit(INVALID_CONFIG_FILE); - } else { - break; - } +/** + * Read an entry for the section from the line. Function returns 0 if an entry + * was found, non-zero otherwise. Return values less than 0 indicate an error + * with the config file. + * + * @param line the line to read the entry from + * @param section the struct to read the entry into + * + * @return 0 if an entry was found + * <0 for config file errors + * >0 for issues such as empty line + * + */ +static int read_section_entry(const char *line, struct section *section) { + char *equaltok; + char *temp_equaltok; + const char *splitter = "="; + char *buffer; + size_t len = 0; + if (line == NULL || section == NULL) { + fprintf(ERRORFILE, "NULL params passed to read_section_entry"); + return -1; + } + len = strlen(line); + if (len == 0) { + return 1; + } + if ((section->size) % MAX_SIZE == 0) { + section->kv_pairs = (struct kv_pair **) realloc( + section->kv_pairs, + sizeof(struct kv_pair *) * (MAX_SIZE + section->size)); + if (section->kv_pairs == NULL) { + fprintf(ERRORFILE, + "Failed re-allocating memory for configuration items\n"); + exit(OUT_OF_MEMORY); + } + } + + buffer = strdup(line); + if (!buffer) { + fprintf(ERRORFILE, "Failed to allocating memory for line, %s\n", __func__); + exit(OUT_OF_MEMORY); + } + + //tokenize first to get key and list of values. + //if no equals is found ignore this line, can be an empty line also + equaltok = strtok_r(buffer, splitter, &temp_equaltok); + if (equaltok == NULL) { + fprintf(ERRORFILE, "Error with line '%s', no '=' found\n", buffer); + exit(INVALID_CONFIG_FILE); + } + section->kv_pairs[section->size] = (struct kv_pair *) malloc( + sizeof(struct kv_pair)); + if (section->kv_pairs[section->size] == NULL) { + fprintf(ERRORFILE, "Failed allocating memory for single section item\n"); + exit(OUT_OF_MEMORY); + } + memset(section->kv_pairs[section->size], 0, + sizeof(struct kv_pair)); + section->kv_pairs[section->size]->key = trim(equaltok); + + equaltok = strtok_r(NULL, splitter, &temp_equaltok); + if (equaltok == NULL) { + // this can happen because no value was set + // e.g. banned.users=#this is a comment + int has_values = 1; + if (strstr(line, splitter) == NULL) { + fprintf(ERRORFILE, "configuration tokenization failed, error with line %s\n", line); + has_values = 0; + } + + // It is not a valid line, free memory. + free((void *) section->kv_pairs[section->size]->key); + free((void *) section->kv_pairs[section->size]); + section->kv_pairs[section->size] = NULL; + free(buffer); + + // Return -1 when no values + if (!has_values) { + return -1; + } + + // Return 2 for comments + return 2; + } + +#ifdef DEBUG + fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok); +#endif + + section->kv_pairs[section->size]->value = trim(equaltok); + section->size++; + free(buffer); + return 0; +} + +/** + * Remove any trailing comment from the supplied line. Function modifies the + * argument provided. + * + * @param line the line from which to remove the comment + */ +static void trim_comment(char *line) { + char *begin_comment = NULL; + if (line != NULL) { + begin_comment = strchr(line, COMMENT_BEGIN_CHAR); + if (begin_comment != NULL) { + *begin_comment = '\0'; + } + } +} + +/** + * Allocate a section struct and initialize it. The memory must be freed by + * the caller. Function calls exit if any error occurs. + * + * @return pointer to the allocated section struct + * + */ +static struct section *allocate_section() { + struct section *section = (struct section *) malloc(sizeof(struct section)); + if (section == NULL) { + fprintf(ERRORFILE, "malloc failed while allocating section.\n"); + exit(OUT_OF_MEMORY); + } + section->name = NULL; + section->kv_pairs = NULL; + section->size = 0; + return section; +} + +/** + * Populate the given section struct with fields from the config file. + * + * @param conf_file the file to read from + * @param section pointer to the section struct to populate + * + */ +static void populate_section_fields(FILE *conf_file, struct section *section) { + char *line; + long int offset = 0; + while (!feof(conf_file)) { + offset = ftell(conf_file); + line = read_config_line(conf_file); + if (line != NULL) { + if (!is_comment_line(line)) { + trim_comment(line); + if (!is_section_start_line(line)) { + if (section->name != NULL) { + if (read_section_entry(line, section) < 0) { + fprintf(ERRORFILE, "Error parsing line %s", line); + exit(INVALID_CONFIG_FILE); + } + } else { + fprintf(ERRORFILE, "Line '%s' doesn't belong to a section\n", + line); + exit(INVALID_CONFIG_FILE); + } + } else { + if (section->name == NULL) { + section->name = get_section_name(line); + if (strlen(section->name) == 0) { + fprintf(ERRORFILE, "Empty section name"); + exit(INVALID_CONFIG_FILE); + } + } else { + // we've reached the next section + fseek(conf_file, offset, SEEK_SET); + free(line); + return; + } + } + } + free(line); + } + } +} + +/** + * Read the section current section from the conf file. Section start is + * marked by lines of the form '[section-name]' and continue till the next + * section. + */ +static struct section *read_section(FILE *conf_file) { + struct section *section = allocate_section(); + populate_section_fields(conf_file, section); + if (section->name == NULL) { + free_section(section); + section = NULL; + } + return section; +} + +/** + * Merge two sections and free the second one after the merge, if desired. + * @param section1 the first section + * @param section2 the second section + * @param free_second_section free the second section if set + */ +static void merge_sections(struct section *section1, struct section *section2, const int free_second_section) { + int i = 0; + section1->kv_pairs = (struct kv_pair **) realloc( + section1->kv_pairs, + sizeof(struct kv_pair *) * (section1->size + section2->size)); + if (section1->kv_pairs == NULL) { + fprintf(ERRORFILE, + "Failed re-allocating memory for configuration items\n"); + exit(OUT_OF_MEMORY); + } + for (i = 0; i < section2->size; ++i) { + section1->kv_pairs[section1->size + i] = section2->kv_pairs[i]; + } + section1->size += section2->size; + if (free_second_section) { + free(section2->name); + memset(section2, 0, sizeof(*section2)); + free(section2); + } +} + +int read_config(const char *file_path, struct configuration *cfg) { + FILE *conf_file; + + if (file_path == NULL) { + fprintf(ERRORFILE, "Null configuration filename passed in\n"); + return INVALID_CONFIG_FILE; + } + +#ifdef DEBUG + fprintf(LOGFILE, "read_config :Conf file name is : %s \n", file_path); +#endif + + cfg->size = 0; + conf_file = fopen(file_path, "r"); + if (conf_file == NULL) { + fprintf(ERRORFILE, "Invalid conf file provided, unable to open file" + " : %s \n", file_path); + return (INVALID_CONFIG_FILE); + } + + cfg->sections = (struct section **) malloc( + sizeof(struct section *) * MAX_SIZE); + if (!cfg->sections) { + fprintf(ERRORFILE, + "Failed to allocate memory for configuration sections\n"); + exit(OUT_OF_MEMORY); + } + + // populate any entries in the older format(no sections) + cfg->sections[cfg->size] = allocate_section(); + cfg->sections[cfg->size]->name = strdup(""); + populate_section_fields(conf_file, cfg->sections[cfg->size]); + if (cfg->sections[cfg->size]) { + if (cfg->sections[cfg->size]->size) { + cfg->size++; + } else { + free_section(cfg->sections[cfg->size]); + } + } + + // populate entries in the sections format + while (!feof(conf_file)) { + cfg->sections[cfg->size] = NULL; + struct section *new_section = read_section(conf_file); + if (new_section != NULL) { + struct section *existing_section = + get_configuration_section(new_section->name, cfg); + if (existing_section != NULL) { + merge_sections((struct section *) existing_section, new_section, 1); + } else { + cfg->sections[cfg->size] = new_section; + } + } + + // Check if we need to expand memory for sections. + if (cfg->sections[cfg->size]) { + if ((cfg->size + 1) % MAX_SIZE == 0) { + cfg->sections = (struct section **) realloc(cfg->sections, + sizeof(struct sections *) * (MAX_SIZE + cfg->size)); + if (cfg->sections == NULL) { + fprintf(ERRORFILE, + "Failed re-allocating memory for configuration items\n"); + exit(OUT_OF_MEMORY); + } + } + cfg->size++; } - int eol = strlen(line) - 1; - if(line[eol] == '\n') { - //trim the ending new line - line[eol] = '\0'; - } - //comment line - if(line[0] == '#') { - free(line); - continue; - } - //tokenize first to get key and list of values. - //if no equals is found ignore this line, can be an empty line also - equaltok = strtok_r(line, "=", &temp_equaltok); - if(equaltok == NULL) { - free(line); - continue; - } - cfg->confdetails[cfg->size] = (struct confentry *) malloc( - sizeof(struct confentry)); - if(cfg->confdetails[cfg->size] == NULL) { - fprintf(LOGFILE, - "Failed allocating memory for single configuration item\n"); - goto cleanup; - } - - #ifdef DEBUG - fprintf(LOGFILE, "read_config : Adding conf key : %s \n", equaltok); - #endif - - memset(cfg->confdetails[cfg->size], 0, sizeof(struct confentry)); - cfg->confdetails[cfg->size]->key = trim(equaltok); - - equaltok = strtok_r(NULL, "=", &temp_equaltok); - if (equaltok == NULL) { - fprintf(LOGFILE, "configuration tokenization failed \n"); - goto cleanup; - } - //means value is commented so don't store the key - if(equaltok[0] == '#') { - free(line); - free((void *)cfg->confdetails[cfg->size]->key); - free(cfg->confdetails[cfg->size]); - continue; - } - - #ifdef DEBUG - fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok); - #endif - - cfg->confdetails[cfg->size]->value = trim(equaltok); - if((cfg->size + 1) % MAX_SIZE == 0) { - cfg->confdetails = (struct confentry **) realloc(cfg->confdetails, - sizeof(struct confentry **) * (MAX_SIZE + cfg->size)); - if (cfg->confdetails == NULL) { - fprintf(LOGFILE, - "Failed re-allocating memory for configuration items\n"); - goto cleanup; - } - } - if(cfg->confdetails[cfg->size]) { - cfg->size++; - } - - free(line); } - //close the file fclose(conf_file); if (cfg->size == 0) { - fprintf(ERRORFILE, "Invalid configuration provided in %s\n", file_name); - exit(INVALID_CONFIG_FILE); + free_configuration(cfg); + fprintf(ERRORFILE, "Invalid configuration provided in %s\n", file_path); + return INVALID_CONFIG_FILE; } - - //clean up allocated file name - return; - //free spaces alloced. - cleanup: - if (line != NULL) { - free(line); - } - fclose(conf_file); - free_configurations(cfg); - return; + return 0; } /* @@ -297,11 +555,14 @@ void read_config(const char* file_name, struct configuration *cfg) { * array, next time onwards used the populated array. * */ -char * get_value(const char* key, struct configuration *cfg) { +char *get_section_value(const char *key, const struct section *section) { int count; - for (count = 0; count < cfg->size; count++) { - if (strcmp(cfg->confdetails[count]->key, key) == 0) { - return strdup(cfg->confdetails[count]->value); + if (key == NULL || section == NULL) { + return NULL; + } + for (count = 0; count < section->size; count++) { + if (strcmp(section->kv_pairs[count]->key, key) == 0) { + return strdup(section->kv_pairs[count]->value); } } return NULL; @@ -311,61 +572,80 @@ char * get_value(const char* key, struct configuration *cfg) { * Function to return an array of values for a key. * Value delimiter is assumed to be a ','. */ -char ** get_values(const char * key, struct configuration *cfg) { - char *value = get_value(key, cfg); - return extract_values_delim(value, ","); +char **get_section_values(const char *key, const struct section *cfg) { + return get_section_values_delimiter(key, cfg, ","); } /** * Function to return an array of values for a key, using the specified delimiter. */ -char ** get_values_delim(const char * key, struct configuration *cfg, - const char *delim) { - char *value = get_value(key, cfg); - return extract_values_delim(value, delim); +char **get_section_values_delimiter(const char *key, const struct section *cfg, + const char *delim) { + if (key == NULL || cfg == NULL || delim == NULL) { + return NULL; + } + char *value = get_section_value(key, cfg); + char **split_values = split_delimiter(value, delim); + + if (value) { + free(value); + } + + return split_values; } -char ** extract_values_delim(char *value, const char *delim) { - char ** toPass = NULL; - char *tempTok = NULL; - char *tempstr = NULL; - int size = 0; - int toPassSize = MAX_SIZE; - //first allocate any array of 10 - if(value != NULL) { - toPass = (char **) malloc(sizeof(char *) * toPassSize); - tempTok = strtok_r((char *)value, delim, &tempstr); - while (tempTok != NULL) { - toPass[size++] = tempTok; - if(size == toPassSize) { - toPassSize += MAX_SIZE; - toPass = (char **) realloc(toPass,(sizeof(char *) * toPassSize)); - } - tempTok = strtok_r(NULL, delim, &tempstr); +char *get_configuration_value(const char *key, const char *section, + const struct configuration *cfg) { + const struct section *section_ptr; + if (key == NULL || section == NULL || cfg == NULL) { + return NULL; + } + section_ptr = get_configuration_section(section, cfg); + if (section_ptr != NULL) { + return get_section_value(key, section_ptr); + } + return NULL; +} + +char **get_configuration_values(const char *key, const char *section, + const struct configuration *cfg) { + const struct section *section_ptr; + if (key == NULL || section == NULL || cfg == NULL) { + return NULL; + } + section_ptr = get_configuration_section(section, cfg); + if (section_ptr != NULL) { + return get_section_values(key, section_ptr); + } + return NULL; +} + +char **get_configuration_values_delimiter(const char *key, const char *section, + const struct configuration *cfg, const char *delim) { + const struct section *section_ptr; + if (key == NULL || section == NULL || cfg == NULL || delim == NULL) { + return NULL; + } + section_ptr = get_configuration_section(section, cfg); + if (section_ptr != NULL) { + return get_section_values_delimiter(key, section_ptr, delim); + } + return NULL; +} + +struct section *get_configuration_section(const char *section, + const struct configuration *cfg) { + int i = 0; + if (cfg == NULL || section == NULL) { + return NULL; + } + for (i = 0; i < cfg->size; ++i) { + if (strcmp(cfg->sections[i]->name, section) == 0) { + return cfg->sections[i]; } } - if (toPass != NULL) { - toPass[size] = NULL; - } - return toPass; -} - -/** - * Extracts array of values from the '%' separated list of values. - */ -char ** extract_values(char *value) { - return extract_values_delim(value, "%"); -} - -// free an entry set of values -void free_values(char** values) { - if (*values != NULL) { - free(*values); - } - if (values != NULL) { - free(values); - } + return NULL; } /** @@ -376,12 +656,12 @@ int get_kv_key(const char *input, char *out, size_t out_len) { if (input == NULL) return -EINVAL; - char *split = strchr(input, '='); + const char *split = strchr(input, '='); if (split == NULL) return -EINVAL; - int key_len = split - input; + unsigned long key_len = split - input; if (out_len < (key_len + 1) || out == NULL) return -ENAMETOOLONG; @@ -400,13 +680,13 @@ int get_kv_value(const char *input, char *out, size_t out_len) { if (input == NULL) return -EINVAL; - char *split = strchr(input, '='); + const char *split = strchr(input, '='); if (split == NULL) return -EINVAL; split++; // advance past '=' to the value - int val_len = (input + strlen(input)) - split; + unsigned long val_len = (input + strlen(input)) - split; if (out_len < (val_len + 1) || out == NULL) return -ENAMETOOLONG; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h index 2d14867a0c5..1ea5561bc7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h @@ -16,6 +16,9 @@ * limitations under the License. */ +#ifndef __YARN_CONTAINER_EXECUTOR_CONFIG_H__ +#define __YARN_CONTAINER_EXECUTOR_CONFIG_H__ + #ifdef __FreeBSD__ #define _WITH_GETLINE #endif @@ -23,62 +26,160 @@ #include /** Define a platform-independent constant instead of using PATH_MAX */ - #define EXECUTOR_PATH_MAX 4096 -/** - * Ensure that the configuration file and all of the containing directories - * are only writable by root. Otherwise, an attacker can change the - * configuration and potentially cause damage. - * returns 0 if permissions are ok - */ -int check_configuration_permissions(const char* file_name); - -/** - * Return a string with the configuration file path name resolved via realpath(3) - * - * NOTE: relative path names are resolved relative to the second argument not getwd(3) - */ -char *resolve_config_path(const char* file_name, const char *root); - -// Config data structures. -struct confentry { +// Configuration data structures. +struct kv_pair { const char *key; const char *value; }; -struct configuration { +struct section { int size; - struct confentry **confdetails; + char *name; + struct kv_pair **kv_pairs; }; -// read the given configuration file into the specified config struct. -void read_config(const char* config_file, struct configuration *cfg); - -//method exposed to get the configurations -char *get_value(const char* key, struct configuration *cfg); - -//function to return array of values pointing to the key. Values are -//comma seperated strings. -char ** get_values(const char* key, struct configuration *cfg); +struct configuration { + int size; + struct section **sections; +}; /** - * Function to return an array of values for a key, using the specified - delimiter. + * Function to ensure that the configuration file and all of the containing + * directories are only writable by root. Otherwise, an attacker can change + * the configuration and potentially cause damage. + * + * @param file_name name of the config file + * + * @returns 0 if permissions are correct, non-zero on error */ -char ** get_values_delim(const char * key, struct configuration *cfg, +int check_configuration_permissions(const char *file_name); + +/** + * Return a string with the configuration file path name resolved via + * realpath(3). Relative path names are resolved relative to the second + * argument and not getwd(3). It's up to the caller to free the returned + * value. + * + * @param file_name name of the config file + * @param root the path against which relative path names are to be resolved + * + * @returns the resolved configuration file path + */ +char* resolve_config_path(const char *file_name, const char *root); + +/** + * Read the given configuration file into the specified configuration struct. + * It's the responsibility of the caller to call free_configurations to free + * the allocated memory. The function will check to ensure that the + * configuration file has the appropriate owner and permissions. + * + * @param file_path name of the configuration file to be read + * @param cfg the configuration structure to be filled. + * + * @return 0 on success, non-zero if there was an error + */ +int read_config(const char *file_path, struct configuration *cfg); + +/** + * Get the value for a key in the specified section. It's up to the caller to + * free the memory used for storing the return value. + * + * @param key key the name of the key + * @param section the section to be looked up + * + * @return pointer to the value if the key was found, null otherwise + */ +char* get_section_value(const char *key, const struct section *section); + +/** + * Function to get the values for a given key in the specified section. + * The value is split by ",". It's up to the caller to free the memory used + * for storing the return values. + * + * @param key the key to be looked up + * @param section the section to be looked up + * + * @return array of values, null if the key was not found + */ +char** get_section_values(const char *key, const struct section *section); + +/** + * Function to get the values for a given key in the specified section. + * The value is split by the specified delimiter. It's up to the caller to + * free the memory used for storing the return values. + * + * @param key the key to be looked up + * @param section the section to be looked up + * @param delimiter the delimiter to be used to split the value + * + * @return array of values, null if the key was not found + */ +char** get_section_values_delimiter(const char *key, const struct section *section, const char *delim); -// Extracts array of values from the comma separated list of values. -char ** extract_values(char *value); +/** + * Get the value for a key in the specified section in the specified + * configuration. It's up to the caller to free the memory used for storing + * the return value. + * + * @param key key the name of the key + * @param section the name section to be looked up + * @param cfg the configuration to be used + * + * @return pointer to the value if the key was found, null otherwise + */ +char* get_configuration_value(const char *key, const char* section, + const struct configuration *cfg); -char ** extract_values_delim(char *value, const char *delim); +/** + * Function to get the values for a given key in the specified section in the + * specified configuration. The value is split by ",". It's up to the caller to + * free the memory used for storing the return values. + * + * @param key the key to be looked up + * @param section the name of the section to be looked up + * @param cfg the configuration to be looked up + * + * @return array of values, null if the key was not found + */ +char** get_configuration_values(const char *key, const char* section, + const struct configuration *cfg); -// free the memory returned by get_values -void free_values(char** values); +/** + * Function to get the values for a given key in the specified section in the + * specified configuration. The value is split by the specified delimiter. + * It's up to the caller to free the memory used for storing the return values. + * + * @param key the key to be looked up + * @param section the name of the section to be looked up + * @param cfg the section to be looked up + * @param delimiter the delimiter to be used to split the value + * + * @return array of values, null if the key was not found + */ +char** get_configuration_values_delimiter(const char *key, const char* section, + const struct configuration *cfg, const char *delimiter); -//method to free allocated configuration -void free_configurations(struct configuration *cfg); +/** + * Function to retrieve the specified section from the configuration. + * + * @param section the name of the section to retrieve + * @param cfg the configuration structure to use + * + * @return pointer to section struct containing details of the section + * null on error + */ +struct section* get_configuration_section(const char *section, + const struct configuration *cfg); + +/** + * Method to free an allocated config struct. + * + * @param cfg pointer to the structure to free + */ +void free_configuration(struct configuration *cfg); /** * If str is a string of the form key=val, find 'key' @@ -106,11 +207,4 @@ int get_kv_key(const char *input, char *out, size_t out_len); */ int get_kv_value(const char *input, char *out, size_t out_len); -/** - * Trim whitespace from beginning and end. - * - * @param input Input string that needs to be trimmed - * - * @return the trimmed string allocated with malloc. I has to be freed by the caller -*/ -char* trim(char* input); +#endif diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c index 5070d62a945..560ec1823fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c @@ -18,6 +18,9 @@ #include "configuration.h" #include "container-executor.h" +#include "utils/string-utils.h" +#include "util.h" +#include "config.h" #include #include @@ -40,8 +43,7 @@ #include #include #include - -#include "config.h" +#include #ifndef HAVE_FCHMODAT #include "compat/fchmodat.h" @@ -79,13 +81,19 @@ static const char* TC_READ_STATS_OPTS [] = { "-s", "-b", NULL}; //struct to store the user details struct passwd *user_detail = NULL; +//Docker container related constants. +static const char* DOCKER_CONTAINER_NAME_PREFIX = "container_"; +static const char* DOCKER_CLIENT_CONFIG_ARG = "--config="; +static const char* DOCKER_PULL_COMMAND = "pull"; + FILE* LOGFILE = NULL; FILE* ERRORFILE = NULL; static uid_t nm_uid = -1; static gid_t nm_gid = -1; -struct configuration executor_cfg = {.size=0, .confdetails=NULL}; +struct configuration CFG = {.size=0, .sections=NULL}; +struct section executor_cfg = {.size=0, .kv_pairs=NULL}; char *concatenate(char *concat_pattern, char *return_path_name, int numArgs, ...); @@ -96,18 +104,25 @@ void set_nm_uid(uid_t user, gid_t group) { } //function used to load the configurations present in the secure config -void read_executor_config(const char* file_name) { - read_config(file_name, &executor_cfg); +void read_executor_config(const char *file_name) { + const struct section *tmp = NULL; + int ret = read_config(file_name, &CFG); + if (ret == 0) { + tmp = get_configuration_section("", &CFG); + if (tmp != NULL) { + executor_cfg = *tmp; + } + } } //function used to free executor configuration data void free_executor_configurations() { - free_configurations(&executor_cfg); + free_configuration(&CFG); } //Lookup nodemanager group from container executor configuration. char *get_nodemanager_group() { - return get_value(NM_GROUP_KEY, &executor_cfg); + return get_section_value(NM_GROUP_KEY, &executor_cfg); } int check_executor_permissions(char *executable_file) { @@ -424,8 +439,8 @@ int change_user(uid_t user, gid_t group) { } int is_feature_enabled(const char* feature_key, int default_value, - struct configuration *cfg) { - char *enabled_str = get_value(feature_key, cfg); + struct section *cfg) { + char *enabled_str = get_section_value(feature_key, cfg); int enabled = default_value; if (enabled_str != NULL) { @@ -746,7 +761,7 @@ static struct passwd* get_user_info(const char* user) { } int is_whitelisted(const char *user) { - char **whitelist = get_values(ALLOWED_SYSTEM_USERS_KEY, &executor_cfg); + char **whitelist = get_section_values(ALLOWED_SYSTEM_USERS_KEY, &executor_cfg); char **users = whitelist; if (whitelist != NULL) { for(; *users; ++users) { @@ -774,7 +789,7 @@ struct passwd* check_user(const char *user) { fflush(LOGFILE); return NULL; } - char *min_uid_str = get_value(MIN_USERID_KEY, &executor_cfg); + char *min_uid_str = get_section_value(MIN_USERID_KEY, &executor_cfg); int min_uid = DEFAULT_MIN_USERID; if (min_uid_str != NULL) { char *end_ptr = NULL; @@ -801,7 +816,7 @@ struct passwd* check_user(const char *user) { free(user_info); return NULL; } - char **banned_users = get_values(BANNED_USERS_KEY, &executor_cfg); + char **banned_users = get_section_values(BANNED_USERS_KEY, &executor_cfg); banned_users = banned_users == NULL ? (char**) DEFAULT_BANNED_USERS : banned_users; char **banned_user = banned_users; @@ -1187,7 +1202,6 @@ char** tokenize_docker_command(const char *input, int *split_counter) { char *line = (char *)calloc(strlen(input) + 1, sizeof(char)); char **linesplit = (char **) malloc(sizeof(char *)); char *p = NULL; - int c = 0; *split_counter = 0; strncpy(line, input, strlen(input)); @@ -1208,6 +1222,27 @@ char** tokenize_docker_command(const char *input, int *split_counter) { return linesplit; } +int execute_regex_match(const char *regex_str, const char *input) { + regex_t regex; + int regex_match; + if (0 != regcomp(®ex, regex_str, REG_EXTENDED|REG_NOSUB)) { + fprintf(LOGFILE, "Unable to compile regex."); + fflush(LOGFILE); + exit(ERROR_COMPILING_REGEX); + } + regex_match = regexec(®ex, input, (size_t) 0, NULL, 0); + regfree(®ex); + if(0 == regex_match) { + return 0; + } + return 1; +} + +int validate_docker_image_name(const char *image_name) { + char *regex_str = "^(([a-zA-Z0-9.-]+)(:[0-9]+)?/)?([a-z0-9_./-]+)(:[a-zA-Z0-9_.-]+)?$"; + return execute_regex_match(regex_str, image_name); +} + char* sanitize_docker_command(const char *line) { static struct option long_options[] = { {"name", required_argument, 0, 'n' }, @@ -1222,6 +1257,7 @@ char* sanitize_docker_command(const char *line) { {"cap-drop", required_argument, 0, 'o' }, {"device", required_argument, 0, 'i' }, {"detach", required_argument, 0, 't' }, + {"format", required_argument, 0, 'f' }, {0, 0, 0, 0} }; @@ -1240,6 +1276,35 @@ char* sanitize_docker_command(const char *line) { if(output == NULL) { exit(OUT_OF_MEMORY); } + + // Handle docker client config option. + if(0 == strncmp(linesplit[0], DOCKER_CLIENT_CONFIG_ARG, strlen(DOCKER_CLIENT_CONFIG_ARG))) { + strcat(output, linesplit[0]); + strcat(output, " "); + long index = 0; + while(index < split_counter) { + linesplit[index] = linesplit[index + 1]; + if (linesplit[index] == NULL) { + split_counter--; + break; + } + index++; + } + } + + // Handle docker pull and image name validation. + if (0 == strncmp(linesplit[0], DOCKER_PULL_COMMAND, strlen(DOCKER_PULL_COMMAND))) { + if (0 != validate_docker_image_name(linesplit[1])) { + fprintf(ERRORFILE, "Invalid Docker image name, exiting."); + fflush(ERRORFILE); + exit(DOCKER_IMAGE_INVALID); + } + strcat(output, linesplit[0]); + strcat(output, " "); + strcat(output, linesplit[1]); + return output; + } + strcat(output, linesplit[0]); strcat(output, " "); optind = 1; @@ -1287,6 +1352,11 @@ char* sanitize_docker_command(const char *line) { case 't': quote_and_append_arg(&output, &output_size, "--detach=", optarg); break; + case 'f': + strcat(output, "--format="); + strcat(output, optarg); + strcat(output, " "); + break; default: fprintf(LOGFILE, "Unknown option in docker command, character %d %c, optionindex = %d\n", c, c, optind); fflush(LOGFILE); @@ -1297,7 +1367,16 @@ char* sanitize_docker_command(const char *line) { if(optind < split_counter) { while(optind < split_counter) { - quote_and_append_arg(&output, &output_size, "", linesplit[optind++]); + if (0 == strncmp(linesplit[optind], DOCKER_CONTAINER_NAME_PREFIX, strlen(DOCKER_CONTAINER_NAME_PREFIX))) { + if (1 != validate_container_id(linesplit[optind])) { + fprintf(ERRORFILE, "Specified container_id=%s is invalid\n", linesplit[optind]); + fflush(ERRORFILE); + exit(DOCKER_CONTAINER_NAME_INVALID); + } + strcat(output, linesplit[optind++]); + } else { + quote_and_append_arg(&output, &output_size, "", linesplit[optind++]); + } } } @@ -1305,7 +1384,6 @@ char* sanitize_docker_command(const char *line) { } char* parse_docker_command_file(const char* command_file) { - size_t len = 0; char *line = NULL; ssize_t read; @@ -1328,20 +1406,21 @@ char* parse_docker_command_file(const char* command_file) { if(ret == NULL) { exit(ERROR_SANITIZING_DOCKER_COMMAND); } - fprintf(LOGFILE, "Using command %s\n", ret); - fflush(LOGFILE); + fprintf(ERRORFILE, "Using command %s\n", ret); + fflush(ERRORFILE); return ret; } int run_docker(const char *command_file) { char* docker_command = parse_docker_command_file(command_file); - char* docker_binary = get_value(DOCKER_BINARY_KEY, &executor_cfg); + char* docker_binary = get_section_value(DOCKER_BINARY_KEY, &executor_cfg); docker_binary = check_docker_binary(docker_binary); + size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024); - char* docker_command_with_binary = calloc(sizeof(char), EXECUTOR_PATH_MAX); - snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", docker_binary, docker_command); - char **args = extract_values_delim(docker_command_with_binary, " "); + char* docker_command_with_binary = calloc(sizeof(char), command_size); + snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, docker_command); + char **args = split_delimiter(docker_command_with_binary, " "); int exit_code = -1; if (execvp(docker_binary, args) != 0) { @@ -1488,21 +1567,29 @@ int launch_docker_container_as_user(const char * user, const char *app_id, char *script_file_dest = NULL; char *cred_file_dest = NULL; char *exit_code_file = NULL; - char docker_command_with_binary[EXECUTOR_PATH_MAX]; - char docker_wait_command[EXECUTOR_PATH_MAX]; - char docker_logs_command[EXECUTOR_PATH_MAX]; - char docker_inspect_command[EXECUTOR_PATH_MAX]; - char docker_rm_command[EXECUTOR_PATH_MAX]; + char *docker_command_with_binary = NULL; + char *docker_wait_command = NULL; + char *docker_logs_command = NULL; + char *docker_inspect_command = NULL; + char *docker_rm_command = NULL; int container_file_source =-1; int cred_file_source = -1; int BUFFER_SIZE = 4096; char buffer[BUFFER_SIZE]; + size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024); + + docker_command_with_binary = calloc(sizeof(char), command_size); + docker_wait_command = calloc(sizeof(char), command_size); + docker_logs_command = calloc(sizeof(char), command_size); + docker_inspect_command = calloc(sizeof(char), command_size); + docker_rm_command = calloc(sizeof(char), command_size); + gid_t user_gid = getegid(); uid_t prev_uid = geteuid(); char *docker_command = parse_docker_command_file(command_file); - char *docker_binary = get_value(DOCKER_BINARY_KEY, &executor_cfg); + char *docker_binary = get_section_value(DOCKER_BINARY_KEY, &executor_cfg); docker_binary = check_docker_binary(docker_binary); fprintf(LOGFILE, "Creating script paths...\n"); @@ -1542,7 +1629,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id, goto cleanup; } - snprintf(docker_command_with_binary, EXECUTOR_PATH_MAX, "%s %s", docker_binary, docker_command); + snprintf(docker_command_with_binary, command_size, "%s %s", docker_binary, docker_command); fprintf(LOGFILE, "Launching docker container...\n"); FILE* start_docker = popen(docker_command_with_binary, "r"); @@ -1555,7 +1642,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id, goto cleanup; } - snprintf(docker_inspect_command, EXECUTOR_PATH_MAX, + snprintf(docker_inspect_command, command_size, "%s inspect --format {{.State.Pid}} %s", docker_binary, container_id); @@ -1600,7 +1687,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id, goto cleanup; } - snprintf(docker_wait_command, EXECUTOR_PATH_MAX, + snprintf(docker_wait_command, command_size, "%s wait %s", docker_binary, container_id); fprintf(LOGFILE, "Waiting for docker container to finish...\n"); @@ -1614,7 +1701,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id, if(exit_code != 0) { fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n", exit_code); - snprintf(docker_logs_command, EXECUTOR_PATH_MAX, "%s logs --tail=250 %s", + snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s", docker_binary, container_id); FILE* logs = popen(docker_logs_command, "r"); if(logs != NULL) { @@ -1644,7 +1731,7 @@ int launch_docker_container_as_user(const char * user, const char *app_id, } fprintf(LOGFILE, "Removing docker container post-exit...\n"); - snprintf(docker_rm_command, EXECUTOR_PATH_MAX, + snprintf(docker_rm_command, command_size, "%s rm %s", docker_binary, container_id); FILE* rm_docker = popen(docker_rm_command, "w"); if (pclose (rm_docker) != 0) @@ -1684,6 +1771,11 @@ cleanup: free(exit_code_file); free(script_file_dest); free(cred_file_dest); + free(docker_command_with_binary); + free(docker_wait_command); + free(docker_logs_command); + free(docker_inspect_command); + free(docker_rm_command); return exit_code; } @@ -1837,7 +1929,7 @@ static int rmdir_as_nm(const char* path) { int user_gid = getegid(); int ret = change_effective_user(nm_uid, nm_gid); if (ret == 0) { - if (rmdir(path) != 0) { + if (rmdir(path) != 0 && errno != ENOENT) { fprintf(LOGFILE, "rmdir of %s failed - %s\n", path, strerror(errno)); ret = -1; } @@ -1882,7 +1974,7 @@ static int unlink_helper(int dirfd, const char *name, int flags) { } else { ret = unlink(name); } - if (ret >= 0) { + if (ret >= 0 || errno == ENOENT) { return 0; } return errno; @@ -1919,7 +2011,7 @@ static int is_symlink_helper(int dirfd, const char *name) static int recursive_unlink_helper(int dirfd, const char *name, const char* fullpath) { - int fd = -1, ret = 0; + int fd = -1, ret = 0, unlink_err = 0; DIR *dfd = NULL; struct stat stat; @@ -1928,6 +2020,10 @@ static int recursive_unlink_helper(int dirfd, const char *name, ret = is_symlink_helper(dirfd, name); if (ret < 0) { // is_symlink_helper failed. + if (ret == -ENOENT) { + ret = 0; + goto done; + } ret = -ret; fprintf(LOGFILE, "is_symlink_helper(%s) failed: %s\n", fullpath, strerror(ret)); @@ -1949,6 +2045,10 @@ static int recursive_unlink_helper(int dirfd, const char *name, if (fd == -EACCES) { ret = chmod_helper(dirfd, name, 0700); if (ret) { + if (ret == ENOENT) { + ret = 0; + goto done; + } fprintf(LOGFILE, "chmod(%s) failed: %s\n", fullpath, strerror(ret)); goto done; } @@ -1956,11 +2056,19 @@ static int recursive_unlink_helper(int dirfd, const char *name, } if (fd < 0) { ret = -fd; + if (ret == ENOENT) { + ret = 0; + goto done; + } fprintf(LOGFILE, "error opening %s: %s\n", fullpath, strerror(ret)); goto done; } if (fstat(fd, &stat) < 0) { ret = errno; + if (ret == ENOENT) { + ret = 0; + goto done; + } fprintf(LOGFILE, "failed to stat %s: %s\n", fullpath, strerror(ret)); goto done; } @@ -1974,6 +2082,10 @@ static int recursive_unlink_helper(int dirfd, const char *name, dfd = fdopendir(fd); if (!dfd) { ret = errno; + if (ret == ENOENT) { + ret = 0; + goto done; + } fprintf(LOGFILE, "fopendir(%s) failed: %s\n", fullpath, strerror(ret)); goto done; } @@ -1985,7 +2097,7 @@ static int recursive_unlink_helper(int dirfd, const char *name, de = readdir(dfd); if (!de) { ret = errno; - if (ret) { + if (ret && ret != ENOENT) { fprintf(LOGFILE, "readdir(%s) failed: %s\n", fullpath, strerror(ret)); goto done; } @@ -2003,10 +2115,10 @@ static int recursive_unlink_helper(int dirfd, const char *name, ret = ENOMEM; goto done; } - ret = recursive_unlink_helper(fd, de->d_name, new_fullpath); + int rc = recursive_unlink_helper(fd, de->d_name, new_fullpath); free(new_fullpath); - if (ret) { - goto done; + if (rc && !unlink_err) { + unlink_err = rc; } } if (dirfd != -1) { @@ -2017,7 +2129,7 @@ static int recursive_unlink_helper(int dirfd, const char *name, } } } - ret = 0; + ret = unlink_err; done: if (fd >= 0) { close(fd); @@ -2048,9 +2160,6 @@ static int delete_path(const char *full_path, return PATH_TO_DELETE_IS_NULL; } ret = recursive_unlink_children(full_path); - if (ret == ENOENT) { - return 0; - } if (ret != 0) { fprintf(LOGFILE, "Error while deleting %s: %d (%s)\n", full_path, ret, strerror(ret)); @@ -2333,3 +2442,12 @@ int traffic_control_read_state(char *command_file) { int traffic_control_read_stats(char *command_file) { return run_traffic_control(TC_READ_STATS_OPTS, command_file); } + +/** + * FIXME: (wangda) it's better to move executor_cfg out of container-executor.c + * Now initialize of executor_cfg and data structures are stored inside + * container-executor which is not a good design. + */ +struct configuration* get_cfg() { + return &CFG; +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h index e40bd90dccb..aa38abfee19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h @@ -35,48 +35,6 @@ enum command { LIST_AS_USER = 5 }; -enum errorcodes { - INVALID_ARGUMENT_NUMBER = 1, - //INVALID_USER_NAME 2 - INVALID_COMMAND_PROVIDED = 3, - // SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS (NOT USED) 4 - INVALID_NM_ROOT_DIRS = 5, - SETUID_OPER_FAILED, //6 - UNABLE_TO_EXECUTE_CONTAINER_SCRIPT, //7 - UNABLE_TO_SIGNAL_CONTAINER, //8 - INVALID_CONTAINER_PID, //9 - // ERROR_RESOLVING_FILE_PATH (NOT_USED) 10 - // RELATIVE_PATH_COMPONENTS_IN_FILE_PATH (NOT USED) 11 - // UNABLE_TO_STAT_FILE (NOT USED) 12 - // FILE_NOT_OWNED_BY_ROOT (NOT USED) 13 - // PREPARE_CONTAINER_DIRECTORIES_FAILED (NOT USED) 14 - // INITIALIZE_CONTAINER_FAILED (NOT USED) 15 - // PREPARE_CONTAINER_LOGS_FAILED (NOT USED) 16 - // INVALID_LOG_DIR (NOT USED) 17 - OUT_OF_MEMORY = 18, - // INITIALIZE_DISTCACHEFILE_FAILED (NOT USED) 19 - INITIALIZE_USER_FAILED = 20, - PATH_TO_DELETE_IS_NULL, //21 - INVALID_CONTAINER_EXEC_PERMISSIONS, //22 - // PREPARE_JOB_LOGS_FAILED (NOT USED) 23 - INVALID_CONFIG_FILE = 24, - SETSID_OPER_FAILED = 25, - WRITE_PIDFILE_FAILED = 26, - WRITE_CGROUP_FAILED = 27, - TRAFFIC_CONTROL_EXECUTION_FAILED = 28, - DOCKER_RUN_FAILED = 29, - ERROR_OPENING_DOCKER_FILE = 30, - ERROR_READING_DOCKER_FILE = 31, - FEATURE_DISABLED = 32, - COULD_NOT_CREATE_SCRIPT_COPY = 33, - COULD_NOT_CREATE_CREDENTIALS_FILE = 34, - COULD_NOT_CREATE_WORK_DIRECTORIES = 35, - COULD_NOT_CREATE_APP_LOG_DIRECTORIES = 36, - COULD_NOT_CREATE_TMP_DIRECTORIES = 37, - ERROR_CREATE_CONTAINER_DIRECTORIES_ARGUMENTS = 38, - ERROR_SANITIZING_DOCKER_COMMAND = 39 -}; - enum operations { CHECK_SETUP = 1, MOUNT_CGROUPS = 2, @@ -108,11 +66,6 @@ enum operations { extern struct passwd *user_detail; -// the log file for messages -extern FILE *LOGFILE; -// the log file for error messages -extern FILE *ERRORFILE; - // get the executable's filename char* get_executable(char *argv0); @@ -273,7 +226,7 @@ int create_validate_dir(const char* npath, mode_t perm, const char* path, /** Check if a feature is enabled in the specified configuration. */ int is_feature_enabled(const char* feature_key, int default_value, - struct configuration *cfg); + struct section *cfg); /** Check if tc (traffic control) support is enabled in configuration. */ int is_tc_support_enabled(); @@ -309,3 +262,17 @@ int run_docker(const char *command_file); * Sanitize docker commands. Returns NULL if there was any failure. */ char* sanitize_docker_command(const char *line); + +/* + * Compile the regex_str and determine if the input string matches. + * Return 0 on match, 1 of non-match. + */ +int execute_regex_match(const char *regex_str, const char *input); + +/** + * Validate the docker image name matches the expected input. + * Return 0 on success. + */ +int validate_docker_image_name(const char *image_name); + +struct configuration* get_cfg(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c index ce46b776fd8..55973a2db57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/get_executable.c @@ -31,6 +31,7 @@ #include "config.h" #include "configuration.h" #include "container-executor.h" +#include "util.h" #include #include diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c index fdc0496986d..a05dc787005 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c @@ -19,6 +19,9 @@ #include "config.h" #include "configuration.h" #include "container-executor.h" +#include "util.h" +#include "modules/gpu/gpu-module.h" +#include "modules/cgroups/cgroups-operations.h" #include #include @@ -252,6 +255,14 @@ static int validate_arguments(int argc, char **argv , int *operation) { return INVALID_ARGUMENT_NUMBER; } + /* + * Check if it is a known module, if yes, redirect to module + */ + if (strcmp("--module-gpu", argv[1]) == 0) { + return handle_gpu_request(&update_cgroups_parameters, "gpu", argc - 1, + &argv[1]); + } + if (strcmp("--checksetup", argv[1]) == 0) { *operation = CHECK_SETUP; return 0; @@ -331,6 +342,7 @@ static int validate_arguments(int argc, char **argv , int *operation) { return FEATURE_DISABLED; } } + /* Now we have to validate 'run as user' operations that don't use a 'long option' - we should fix this at some point. The validation/argument parsing here is extensive enough that it done in a separate function */ @@ -420,7 +432,7 @@ static int validate_run_as_user_commands(int argc, char **argv, int *operation) cmd_input.resources_key = resources_key; cmd_input.resources_value = resources_value; - cmd_input.resources_values = extract_values(resources_value); + cmd_input.resources_values = split(resources_value); *operation = RUN_AS_USER_LAUNCH_DOCKER_CONTAINER; return 0; } else { @@ -471,7 +483,7 @@ static int validate_run_as_user_commands(int argc, char **argv, int *operation) cmd_input.resources_key = resources_key; cmd_input.resources_value = resources_value; - cmd_input.resources_values = extract_values(resources_value); + cmd_input.resources_values = split(resources_value); *operation = RUN_AS_USER_LAUNCH_CONTAINER; return 0; @@ -521,7 +533,7 @@ int main(int argc, char **argv) { open_log_files(); assert_valid_setup(argv[0]); - int operation; + int operation = -1; int ret = validate_arguments(argc, argv, &operation); if (ret != 0) { @@ -565,8 +577,8 @@ int main(int argc, char **argv) { exit_code = initialize_app(cmd_input.yarn_user_name, cmd_input.app_id, cmd_input.cred_file, - extract_values(cmd_input.local_dirs), - extract_values(cmd_input.log_dirs), + split(cmd_input.local_dirs), + split(cmd_input.log_dirs), argv + optind); break; case RUN_AS_USER_LAUNCH_DOCKER_CONTAINER: @@ -591,8 +603,8 @@ int main(int argc, char **argv) { cmd_input.script_file, cmd_input.cred_file, cmd_input.pid_file, - extract_values(cmd_input.local_dirs), - extract_values(cmd_input.log_dirs), + split(cmd_input.local_dirs), + split(cmd_input.log_dirs), cmd_input.docker_command_file, cmd_input.resources_key, cmd_input.resources_values); @@ -619,8 +631,8 @@ int main(int argc, char **argv) { cmd_input.script_file, cmd_input.cred_file, cmd_input.pid_file, - extract_values(cmd_input.local_dirs), - extract_values(cmd_input.log_dirs), + split(cmd_input.local_dirs), + split(cmd_input.log_dirs), cmd_input.resources_key, cmd_input.resources_values); free(cmd_input.resources_key); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c new file mode 100644 index 00000000000..b23410928bf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "configuration.h" +#include "container-executor.h" +#include "utils/string-utils.h" +#include "utils/path-utils.h" +#include "modules/common/module-configs.h" +#include "modules/common/constants.h" +#include "modules/cgroups/cgroups-operations.h" +#include "util.h" + +#include +#include +#include +#include + +#define MAX_PATH_LEN 4096 + +static const struct section* cgroup_cfg_section = NULL; + +void reload_cgroups_configuration() { + cgroup_cfg_section = get_configuration_section(CGROUPS_SECTION_NAME, get_cfg()); +} + +char* get_cgroups_path_to_write( + const char* hierarchy_name, + const char* param_name, + const char* group_id) { + int failed = 0; + char* buffer = NULL; + const char* cgroups_root = get_section_value(CGROUPS_ROOT_KEY, + cgroup_cfg_section); + const char* yarn_hierarchy_name = get_section_value( + CGROUPS_YARN_HIERARCHY_KEY, cgroup_cfg_section); + + // Make sure it is defined. + if (!cgroups_root || cgroups_root[0] == 0) { + fprintf(ERRORFILE, "%s is not defined in container-executor.cfg\n", + CGROUPS_ROOT_KEY); + failed = 1; + goto cleanup; + } + + // Make sure it is defined. + if (!yarn_hierarchy_name || yarn_hierarchy_name[0] == 0) { + fprintf(ERRORFILE, "%s is not defined in container-executor.cfg\n", + CGROUPS_YARN_HIERARCHY_KEY); + failed = 1; + goto cleanup; + } + + buffer = malloc(MAX_PATH_LEN + 1); + if (!buffer) { + fprintf(ERRORFILE, "Failed to allocate memory for output path.\n"); + failed = 1; + goto cleanup; + } + + // Make a path. + // CGroups path should not be too long. + if (snprintf(buffer, MAX_PATH_LEN, "%s/%s/%s/%s/%s.%s", + cgroups_root, hierarchy_name, yarn_hierarchy_name, + group_id, hierarchy_name, param_name) < 0) { + fprintf(ERRORFILE, "Failed to print output path.\n"); + failed = 1; + goto cleanup; + } + +cleanup: + if (failed) { + if (buffer) { + free(buffer); + } + return NULL; + } + return buffer; +} + +int update_cgroups_parameters( + const char* hierarchy_name, + const char* param_name, + const char* group_id, + const char* value) { +#ifndef __linux + fprintf(ERRORFILE, "Failed to update cgroups parameters, not supported\n"); + return -1; +#endif + int failure = 0; + + if (!cgroup_cfg_section) { + reload_cgroups_configuration(); + } + + char* full_path = get_cgroups_path_to_write(hierarchy_name, param_name, + group_id); + + if (!full_path) { + fprintf(ERRORFILE, + "Failed to get cgroups path to write, it should be a configuration issue"); + failure = 1; + goto cleanup; + } + + if (!verify_path_safety(full_path)) { + failure = 1; + goto cleanup; + } + + // Make sure file exists + struct stat sb; + if (stat(full_path, &sb) != 0) { + fprintf(ERRORFILE, "CGroups: Could not find file to write, %s", full_path); + failure = 1; + goto cleanup; + } + + fprintf(ERRORFILE, "CGroups: Updating cgroups, path=%s, value=%s", + full_path, value); + + // Write values to file + FILE *f; + f = fopen(full_path, "a"); + if (!f) { + fprintf(ERRORFILE, "CGroups: Failed to open cgroups file, %s", full_path); + failure = 1; + goto cleanup; + } + if (fprintf(f, "%s", value) < 0) { + fprintf(ERRORFILE, "CGroups: Failed to write cgroups file, %s", full_path); + fclose(f); + failure = 1; + goto cleanup; + } + if (fclose(f) != 0) { + fprintf(ERRORFILE, "CGroups: Failed to close cgroups file, %s", full_path); + failure = 1; + goto cleanup; + } + +cleanup: + if (full_path) { + free(full_path); + } + return -failure; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.h new file mode 100644 index 00000000000..cf80bcf6059 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.h @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _CGROUPS_OPERATIONS_H_ +#define _CGROUPS_OPERATIONS_H_ + +#define CGROUPS_SECTION_NAME "cgroups" +#define CGROUPS_ROOT_KEY "root" +#define CGROUPS_YARN_HIERARCHY_KEY "yarn-hierarchy" + +/** + * Handle update CGroups parameter update requests: + * - hierarchy_name: e.g. devices / cpu,cpuacct + * - param_name: e.g. deny + * - group_id: e.g. container_x_y + * - value: e.g. "a *:* rwm" + * + * return 0 if succeeded + */ +int update_cgroups_parameters( + const char* hierarchy_name, + const char* param_name, + const char* group_id, + const char* value); + + /** + * Get CGroups path to update. Visible for testing. + * Return 0 if succeeded + */ + char* get_cgroups_path_to_write( + const char* hierarchy_name, + const char* param_name, + const char* group_id); + + /** + * Reload config from filesystem, visible for testing. + */ + void reload_cgroups_configuration(); + +#endif \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/constants.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/constants.h new file mode 100644 index 00000000000..5c8c4e939ee --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/constants.h @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* FreeBSD protects the getline() prototype. See getline(3) for more */ +#ifdef __FreeBSD__ +#define _WITH_GETLINE +#endif + +#ifndef _MODULES_COMMON_CONSTANTS_H_ +#define _MODULES_COMMON_CONSTANTS_H_ + +#define CONFIGS_MODULES_PREFIX "yarn.container-executor.modules." + +#endif \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.c new file mode 100644 index 00000000000..f0c6d16fbb2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.c @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "util.h" +#include "configuration.h" +#include "container-executor.h" +#include "modules/common/constants.h" + +#include +#include +#include + +#define ENABLED_CONFIG_KEY "module.enabled" + +int module_enabled(const struct section* section_cfg, const char* module_name) { + char* enabled_str = get_section_value(ENABLED_CONFIG_KEY, section_cfg); + int enabled = 0; + if (enabled_str && 0 == strcmp(enabled_str, "true")) { + enabled = 1; + } else { + fprintf(LOGFILE, "Module %s is disabled\n", module_name); + } + + free(enabled_str); + return enabled; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.h new file mode 100644 index 00000000000..d58c618d517 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/common/module-configs.h @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef __FreeBSD__ +#define _WITH_GETLINE +#endif + +#ifndef _MODULES_COMMON_MODULE_CONFIGS_H_ +#define _MODULES_COMMON_MODULE_CONFIGS_H_ + + +/** + * check if module enabled given name of module. + * return 0 if disabled + */ +int module_enabled(const struct section* section_cfg, const char* module_name); + +#endif \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c new file mode 100644 index 00000000000..f96645d3970 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c @@ -0,0 +1,229 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "configuration.h" +#include "container-executor.h" +#include "utils/string-utils.h" +#include "modules/gpu/gpu-module.h" +#include "modules/cgroups/cgroups-operations.h" +#include "modules/common/module-configs.h" +#include "modules/common/constants.h" +#include "util.h" + +#include +#include +#include +#include +#include + +#define EXCLUDED_GPUS_OPTION "excluded_gpus" +#define CONTAINER_ID_OPTION "container_id" +#define DEFAULT_NVIDIA_MAJOR_NUMBER 195 +#define MAX_CONTAINER_ID_LEN 128 + +static const struct section* cfg_section; + +static int internal_handle_gpu_request( + update_cgroups_parameters_func update_cgroups_parameters_func_p, + size_t n_minor_devices_to_block, int minor_devices[], + const char* container_id) { + char* allowed_minor_numbers_str = NULL; + int* allowed_minor_numbers = NULL; + size_t n_allowed_minor_numbers = 0; + int return_code = 0; + + if (n_minor_devices_to_block == 0) { + // no device to block, just return; + return 0; + } + + // Get major device number from cfg, if not set, major number of (Nvidia) + // will be the default value. + int major_device_number; + char* major_number_str = get_section_value(GPU_MAJOR_NUMBER_CONFIG_KEY, + cfg_section); + if (!major_number_str || 0 == major_number_str[0]) { + // Default major number of Nvidia devices + major_device_number = DEFAULT_NVIDIA_MAJOR_NUMBER; + } else { + major_device_number = strtol(major_number_str, NULL, 0); + } + + // Get allowed minor device numbers from cfg, if not set, means all minor + // devices can be used by YARN + allowed_minor_numbers_str = get_section_value( + GPU_ALLOWED_DEVICES_MINOR_NUMBERS, + cfg_section); + if (!allowed_minor_numbers_str || 0 == allowed_minor_numbers_str[0]) { + allowed_minor_numbers = NULL; + } else { + int rc = get_numbers_split_by_comma(allowed_minor_numbers_str, + &allowed_minor_numbers, + &n_allowed_minor_numbers); + if (0 != rc) { + fprintf(ERRORFILE, + "Failed to get allowed minor device numbers from cfg, value=%s\n", + allowed_minor_numbers_str); + return_code = -1; + goto cleanup; + } + + // Make sure we're trying to black devices allowed in config + for (int i = 0; i < n_minor_devices_to_block; i++) { + int found = 0; + for (int j = 0; j < n_allowed_minor_numbers; j++) { + if (minor_devices[i] == allowed_minor_numbers[j]) { + found = 1; + break; + } + } + + if (!found) { + fprintf(ERRORFILE, + "Trying to blacklist device with minor-number=%d which is not on allowed list\n", + minor_devices[i]); + return_code = -1; + goto cleanup; + } + } + } + + // Use cgroup helpers to blacklist devices + for (int i = 0; i < n_minor_devices_to_block; i++) { + char param_value[128]; + memset(param_value, 0, sizeof(param_value)); + snprintf(param_value, sizeof(param_value), "c %d:%d rwm", + major_device_number, i); + + int rc = update_cgroups_parameters_func_p("devices", "deny", + container_id, param_value); + + if (0 != rc) { + fprintf(ERRORFILE, "CGroups: Failed to update cgroups\n"); + return_code = -1; + goto cleanup; + } + } + +cleanup: + if (major_number_str) { + free(major_number_str); + } + if (allowed_minor_numbers) { + free(allowed_minor_numbers); + } + if (allowed_minor_numbers_str) { + free(allowed_minor_numbers_str); + } + + return return_code; +} + +void reload_gpu_configuration() { + cfg_section = get_configuration_section(GPU_MODULE_SECTION_NAME, get_cfg()); +} + +/* + * Format of GPU request commandline: + * + * c-e gpu --excluded_gpus 0,1,3 --container_id container_x_y + */ +int handle_gpu_request(update_cgroups_parameters_func func, + const char* module_name, int module_argc, char** module_argv) { + if (!cfg_section) { + reload_gpu_configuration(); + } + + if (!module_enabled(cfg_section, GPU_MODULE_SECTION_NAME)) { + fprintf(ERRORFILE, + "Please make sure gpu module is enabled before using it.\n"); + return -1; + } + + static struct option long_options[] = { + {EXCLUDED_GPUS_OPTION, required_argument, 0, 'e' }, + {CONTAINER_ID_OPTION, required_argument, 0, 'c' }, + {0, 0, 0, 0} + }; + + int rc = 0; + int c = 0; + int option_index = 0; + + int* minor_devices = NULL; + char container_id[MAX_CONTAINER_ID_LEN]; + memset(container_id, 0, sizeof(container_id)); + size_t n_minor_devices_to_block = 0; + int failed = 0; + + optind = 1; + while((c = getopt_long(module_argc, module_argv, "e:c:", + long_options, &option_index)) != -1) { + switch(c) { + case 'e': + rc = get_numbers_split_by_comma(optarg, &minor_devices, + &n_minor_devices_to_block); + if (0 != rc) { + fprintf(ERRORFILE, + "Failed to get minor devices number from command line, value=%s\n", + optarg); + failed = 1; + goto cleanup; + } + break; + case 'c': + if (!validate_container_id(optarg)) { + fprintf(ERRORFILE, + "Specified container_id=%s is invalid\n", optarg); + failed = 1; + goto cleanup; + } + strncpy(container_id, optarg, MAX_CONTAINER_ID_LEN); + break; + default: + fprintf(ERRORFILE, + "Unknown option in gpu command character %d %c, optionindex = %d\n", + c, c, optind); + failed = 1; + goto cleanup; + } + } + + if (0 == container_id[0]) { + fprintf(ERRORFILE, + "[%s] --container_id must be specified.\n", __func__); + failed = 1; + goto cleanup; + } + + if (!minor_devices) { + // Minor devices is null, skip following call. + fprintf(ERRORFILE, "is not specified, skip cgroups call.\n"); + goto cleanup; + } + + failed = internal_handle_gpu_request(func, n_minor_devices_to_block, + minor_devices, + container_id); + +cleanup: + if (minor_devices) { + free(minor_devices); + } + return failed; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.h new file mode 100644 index 00000000000..59d4c7e9cb1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.h @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef __FreeBSD__ +#define _WITH_GETLINE +#endif + +#ifndef _MODULES_GPU_GPU_MUDULE_H_ +#define _MODULES_GPU_GPU_MUDULE_H_ + +#define GPU_MAJOR_NUMBER_CONFIG_KEY "gpu.major-device-number" +#define GPU_ALLOWED_DEVICES_MINOR_NUMBERS "gpu.allowed-device-minor-numbers" +#define GPU_MODULE_SECTION_NAME "gpu" + +// For unit test stubbing +typedef int (*update_cgroups_parameters_func)(const char*, const char*, + const char*, const char*); + +/** + * Handle gpu requests + */ +int handle_gpu_request(update_cgroups_parameters_func func, + const char* module_name, int module_argc, char** module_argv); + +/** + * Reload config from filesystem, visible for testing. + */ +void reload_gpu_configuration(); + +#endif \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c new file mode 100644 index 00000000000..8e39ca85760 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.c @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "util.h" +#include +#include +#include +#include + +char** split_delimiter(char *value, const char *delim) { + char **return_values = NULL; + char *temp_tok = NULL; + char *tempstr = NULL; + int size = 0; + int per_alloc_size = 10; + int return_values_size = per_alloc_size; + int failed = 0; + + //first allocate any array of 10 + if(value != NULL) { + return_values = (char **) malloc(sizeof(char *) * return_values_size); + if (!return_values) { + fprintf(ERRORFILE, "Allocation error for return_values in %s.\n", + __func__); + failed = 1; + goto cleanup; + } + memset(return_values, 0, sizeof(char *) * return_values_size); + + temp_tok = strtok_r(value, delim, &tempstr); + while (temp_tok != NULL) { + temp_tok = strdup(temp_tok); + if (NULL == temp_tok) { + fprintf(ERRORFILE, "Allocation error in %s.\n", __func__); + failed = 1; + goto cleanup; + } + + return_values[size++] = temp_tok; + + // Make sure returned values has enough space for the trailing NULL. + if (size >= return_values_size - 1) { + return_values_size += per_alloc_size; + return_values = (char **) realloc(return_values,(sizeof(char *) * + return_values_size)); + + // Make sure new added memory are filled with NULL + for (int i = size; i < return_values_size; i++) { + return_values[i] = NULL; + } + } + temp_tok = strtok_r(NULL, delim, &tempstr); + } + } + + // Put trailing NULL to indicate values terminates. + if (return_values != NULL) { + return_values[size] = NULL; + } + +cleanup: + if (failed) { + free_values(return_values); + return NULL; + } + + return return_values; +} + +/** + * Extracts array of values from the '%' separated list of values. + */ +char** split(char *value) { + return split_delimiter(value, "%"); +} + +// free an entry set of values +void free_values(char** values) { + if (values != NULL) { + int idx = 0; + while (values[idx]) { + free(values[idx]); + idx++; + } + free(values); + } +} + +/** + * Trim whitespace from beginning and end. +*/ +char* trim(const char* input) { + const char *val_begin; + const char *val_end; + char *ret; + + if (input == NULL) { + return NULL; + } + + val_begin = input; + val_end = input + strlen(input); + + while (val_begin < val_end && isspace(*val_begin)) + val_begin++; + while (val_end > val_begin && isspace(*(val_end - 1))) + val_end--; + + ret = (char *) malloc( + sizeof(char) * (val_end - val_begin + 1)); + if (ret == NULL) { + fprintf(ERRORFILE, "Allocation error\n"); + exit(OUT_OF_MEMORY); + } + + strncpy(ret, val_begin, val_end - val_begin); + ret[val_end - val_begin] = '\0'; + return ret; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h new file mode 100644 index 00000000000..fa21def5339 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __YARN_POSIX_CONTAINER_EXECUTOR_UTIL_H__ +#define __YARN_POSIX_CONTAINER_EXECUTOR_UTIL_H__ + +#include + +enum errorcodes { + INVALID_ARGUMENT_NUMBER = 1, + //INVALID_USER_NAME 2 + INVALID_COMMAND_PROVIDED = 3, + // SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS (NOT USED) 4 + INVALID_NM_ROOT_DIRS = 5, + SETUID_OPER_FAILED, //6 + UNABLE_TO_EXECUTE_CONTAINER_SCRIPT, //7 + UNABLE_TO_SIGNAL_CONTAINER, //8 + INVALID_CONTAINER_PID, //9 + // ERROR_RESOLVING_FILE_PATH (NOT_USED) 10 + // RELATIVE_PATH_COMPONENTS_IN_FILE_PATH (NOT USED) 11 + // UNABLE_TO_STAT_FILE (NOT USED) 12 + // FILE_NOT_OWNED_BY_ROOT (NOT USED) 13 + // PREPARE_CONTAINER_DIRECTORIES_FAILED (NOT USED) 14 + // INITIALIZE_CONTAINER_FAILED (NOT USED) 15 + // PREPARE_CONTAINER_LOGS_FAILED (NOT USED) 16 + // INVALID_LOG_DIR (NOT USED) 17 + OUT_OF_MEMORY = 18, + // INITIALIZE_DISTCACHEFILE_FAILED (NOT USED) 19 + INITIALIZE_USER_FAILED = 20, + PATH_TO_DELETE_IS_NULL, //21 + INVALID_CONTAINER_EXEC_PERMISSIONS, //22 + // PREPARE_JOB_LOGS_FAILED (NOT USED) 23 + INVALID_CONFIG_FILE = 24, + SETSID_OPER_FAILED = 25, + WRITE_PIDFILE_FAILED = 26, + WRITE_CGROUP_FAILED = 27, + TRAFFIC_CONTROL_EXECUTION_FAILED = 28, + DOCKER_RUN_FAILED = 29, + ERROR_OPENING_DOCKER_FILE = 30, + ERROR_READING_DOCKER_FILE = 31, + FEATURE_DISABLED = 32, + COULD_NOT_CREATE_SCRIPT_COPY = 33, + COULD_NOT_CREATE_CREDENTIALS_FILE = 34, + COULD_NOT_CREATE_WORK_DIRECTORIES = 35, + COULD_NOT_CREATE_APP_LOG_DIRECTORIES = 36, + COULD_NOT_CREATE_TMP_DIRECTORIES = 37, + ERROR_CREATE_CONTAINER_DIRECTORIES_ARGUMENTS = 38, + ERROR_SANITIZING_DOCKER_COMMAND = 39, + DOCKER_IMAGE_INVALID = 40, + DOCKER_CONTAINER_NAME_INVALID = 41, + ERROR_COMPILING_REGEX = 42 +}; + +/* Macros for min/max. */ +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif /* MIN */ +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif /* MAX */ + +// the log file for messages +extern FILE *LOGFILE; +// the log file for error messages +extern FILE *ERRORFILE; +/** + * Function to split the given string using '%' as the separator. It's + * up to the caller to free the memory for the returned array. Use the + * free_values function to free the allocated memory. + * + * @param str the string to split + * + * @return an array of strings + */ +char** split(char *str); + +/** + * Function to split the given string using the delimiter specified. It's + * up to the caller to free the memory for the returned array. Use the + * free_values function to free the allocated memory. + * + * @param str the string to split + * @param delimiter the delimiter to use + * + * @return an array of strings + */ +char** split_delimiter(char *value, const char *delimiter); + +/** + * Function to free an array of strings. + * + * @param values the array to free + * + */ +void free_values(char **values); + +/** + * Trim whitespace from beginning and end. The returned string has to be freed + * by the caller. + * + * @param input Input string that needs to be trimmed + * + * @return the trimmed string allocated with malloc +*/ +char* trim(const char *input); + +#endif diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.c new file mode 100644 index 00000000000..dea656b9aea --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.c @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "util.h" + +#include +#include +#include +#include + +int verify_path_safety(const char* path) { + if (!path || path[0] == 0) { + return 1; + } + + char* dup = strdup(path); + if (!dup) { + fprintf(ERRORFILE, "%s: Failed to allocate memory for path.\n", __func__); + return 0; + } + + char* p = strtok(dup, "/"); + int succeeded = 1; + + while (p != NULL) { + if (0 == strcmp(p, "..")) { + fprintf(ERRORFILE, "%s: Path included \"..\", path=%s.\n", __func__, path); + succeeded = 0; + break; + } + + p = strtok(NULL, "/"); + } + free(dup); + + return succeeded; +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.h new file mode 100644 index 00000000000..a42f9366866 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/path-utils.h @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef __FreeBSD__ +#define _WITH_GETLINE +#endif + +#ifndef _UTILS_PATH_UTILS_H_ +#define _UTILS_PATH_UTILS_H_ + +/* + * Verify if a given path is safe or not. For example, we don't want a path + * include ".." which can do things like: + * - "/cgroups/cpu,cpuacct/container/../../../etc/passwd" + * + * return false/true + */ +int verify_path_safety(const char* path); + +#endif diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c new file mode 100644 index 00000000000..d19c08493a4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "util.h" + +#include +#include +#include +#include +#include +#include + +/* + * if all chars in the input str are numbers + * return true/false + */ +static int all_numbers(char* input) { + for (; input[0] != 0; input++) { + if (input[0] < '0' || input[0] > '9') { + return 0; + } + } + return 1; +} + +int get_numbers_split_by_comma(const char* input, int** numbers, + size_t* ret_n_numbers) { + size_t allocation_size = 1; + int i = 0; + while (input[i] != 0) { + if (input[i] == ',') { + allocation_size++; + } + i++; + } + + (*numbers) = malloc(sizeof(int) * allocation_size); + if (!(*numbers)) { + fprintf(ERRORFILE, "Failed to allocating memory for *numbers: %s\n", + __func__); + exit(OUT_OF_MEMORY); + } + memset(*numbers, 0, sizeof(int) * allocation_size); + + char* input_cpy = strdup(input); + if (!input_cpy) { + fprintf(ERRORFILE, "Failed to allocating memory for input_cpy: %s\n", + __func__); + exit(OUT_OF_MEMORY); + } + + char* p = strtok(input_cpy, ","); + int idx = 0; + size_t n_numbers = 0; + while (p != NULL) { + char *temp; + long n = strtol(p, &temp, 0); + // According to answer: + // https://stackoverflow.com/questions/14176123/correct-usage-of-strtol + // We need to properly check errno and overflows + if (temp == p || *temp != '\0' || + ((n == LONG_MIN || n == LONG_MAX) && errno == ERANGE)) { + fprintf(stderr, + "Could not convert '%s' to long and leftover string is: '%s'\n", + p, temp); + free(input_cpy); + return -1; + } + + n_numbers++; + (*numbers)[idx] = n; + p = strtok(NULL, ","); + idx++; + } + + free(input_cpy); + *ret_n_numbers = n_numbers; + + return 0; +} + +int validate_container_id(const char* input) { + int is_container_id = 1; + + /* + * Two different forms of container_id + * container_e17_1410901177871_0001_01_000005 + * container_1410901177871_0001_01_000005 + */ + if (!input) { + return 0; + } + + char* input_cpy = strdup(input); + if (!input_cpy) { + return 0; + } + + char* p = strtok(input_cpy, "_"); + int idx = 0; + while (p != NULL) { + if (0 == idx) { + if (0 != strcmp("container", p)) { + is_container_id = 0; + goto cleanup; + } + } else if (1 == idx) { + // this could be e[n][n], or [n][n]... + if (!all_numbers(p)) { + if (p[0] == 0) { + is_container_id = 0; + goto cleanup; + } + if (p[0] != 'e') { + is_container_id = 0; + goto cleanup; + } + if (!all_numbers(p + 1)) { + is_container_id = 0; + goto cleanup; + } + } + } else { + // otherwise, should be all numbers + if (!all_numbers(p)) { + is_container_id = 0; + goto cleanup; + } + } + + p = strtok(NULL, "_"); + idx++; + } + +cleanup: + if (input_cpy) { + free(input_cpy); + } + + // We should have [5,6] elements split by '_' + if (idx > 6 || idx < 5) { + is_container_id = 0; + } + return is_container_id; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h new file mode 100644 index 00000000000..c095eb6bbc8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef __FreeBSD__ +#define _WITH_GETLINE +#endif + +#ifndef _UTILS_STRING_UTILS_H_ +#define _UTILS_STRING_UTILS_H_ + +/* + * Get numbers split by comma from a input string + * return false/true + */ +int validate_container_id(const char* input); + +/* + * return 0 if succeeded + */ +int get_numbers_split_by_comma(const char* input, int** numbers, size_t* n_numbers); + +#endif diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/configuration-1.cfg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/configuration-1.cfg new file mode 100644 index 00000000000..4d0b90d0cec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/configuration-1.cfg @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +[section-1] +key1=value1 +split-key=val1,val2,val3 +perc-key=perc-val1%perc-val2 +# some comment + +[split-section] +key3=value3 + +[section-2] +key1=value2 + +key2=value2 + +[split-section] +key4=value4 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/configuration-2.cfg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/configuration-2.cfg new file mode 100644 index 00000000000..aa02db84a44 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/configuration-2.cfg @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test mixed mode config file +# Initial few lines are in the key=value format +# and then the sections start + +key1=value1 +key2=value2 + + +[section-1] +key3=value3 +key1=value4 + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/old-config.cfg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/old-config.cfg new file mode 100644 index 00000000000..947a3fae605 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/resources/test/test-configurations/old-config.cfg @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +yarn.nodemanager.linux-container-executor.group=yarn +banned.users=root,testuser1,testuser2#comma separated list of users who can not run applications +min.user.id=1000 +allowed.system.users=nobody,daemon +feature.docker.enabled=1 +feature.tc.enabled=0 +docker.binary=/usr/bin/docker +yarn.local.dirs=/var/run/yarn%/tmp/mydir +test.key=#no value for this key +# test.key2=0 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc new file mode 100644 index 00000000000..8ffbe884a64 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/cgroups/test-cgroups-module.cc @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +extern "C" { +#include "configuration.h" +#include "container-executor.h" +#include "modules/cgroups/cgroups-operations.h" +#include "test/test-container-executor-common.h" +#include "util.h" +} + +namespace ContainerExecutor { + +class TestCGroupsModule : public ::testing::Test { +protected: + virtual void SetUp() { + if (mkdirs(TEST_ROOT, 0755) != 0) { + fprintf(ERRORFILE, "Failed to mkdir TEST_ROOT: %s\n", TEST_ROOT); + exit(1); + } + LOGFILE = stdout; + ERRORFILE = stderr; + } + + virtual void TearDown() {} +}; + +TEST_F(TestCGroupsModule, test_cgroups_get_path_without_define_root) { + // Write config file. + const char *filename = TEST_ROOT "/test_cgroups_get_path_without_root.cfg"; + FILE *file = fopen(filename, "w"); + if (file == NULL) { + printf("FAIL: Could not open configuration file: %s\n", filename); + exit(1); + } + fprintf(file, "[cgroups]\n"); + fprintf(file, "yarn-hierarchy=yarn\n"); + fclose(file); + + // Read config file + read_executor_config(filename); + reload_cgroups_configuration(); + + char* path = get_cgroups_path_to_write("devices", "deny", "container_1"); + + ASSERT_TRUE(NULL == path) << "Should fail.\n"; +} + +TEST_F(TestCGroupsModule, test_cgroups_get_path_without_define_yarn_hierarchy) { + // Write config file. + const char *filename = TEST_ROOT "/test_cgroups_get_path_without_root.cfg"; + FILE *file = fopen(filename, "w"); + + ASSERT_TRUE(file) << "FAIL: Could not open configuration file: " << filename + << "\n"; + fprintf(file, "[cgroups]\n"); + fprintf(file, "root=/sys/fs/cgroups\n"); + fclose(file); + + // Read config file + read_executor_config(filename); + reload_cgroups_configuration(); + char* path = get_cgroups_path_to_write("devices", "deny", "container_1"); + + ASSERT_TRUE(NULL == path) << "Should fail.\n"; +} + +TEST_F(TestCGroupsModule, test_cgroups_get_path_succeeded) { + // Write config file. + const char *filename = TEST_ROOT "/test_cgroups_get_path.cfg"; + FILE *file = fopen(filename, "w"); + + ASSERT_TRUE(file) << "FAIL: Could not open configuration file\n"; + fprintf(file, "[cgroups]\n"); + fprintf(file, "root=/sys/fs/cgroups \n"); + fprintf(file, "yarn-hierarchy=yarn \n"); + fclose(file); + + // Read config file + read_executor_config(filename); + reload_cgroups_configuration(); + + char* path = get_cgroups_path_to_write("devices", "deny", "container_1"); + ASSERT_TRUE(NULL != path) << "Should success.\n"; + + const char *EXPECTED = + "/sys/fs/cgroups/devices/yarn/container_1/devices.deny"; + + ASSERT_STREQ(EXPECTED, path) + << "Return cgroup-path-to-write is not expected\n"; +} +} // namespace ContainerExecutor \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc new file mode 100644 index 00000000000..7e41fb43d1c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/modules/gpu/test-gpu-module.cc @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +extern "C" { +#include "configuration.h" +#include "container-executor.h" +#include "modules/cgroups/cgroups-operations.h" +#include "modules/gpu/gpu-module.h" +#include "test/test-container-executor-common.h" +#include "util.h" +} + +namespace ContainerExecutor { + +class TestGpuModule : public ::testing::Test { +protected: + virtual void SetUp() { + if (mkdirs(TEST_ROOT, 0755) != 0) { + fprintf(ERRORFILE, "Failed to mkdir TEST_ROOT: %s\n", TEST_ROOT); + exit(1); + } + LOGFILE = stdout; + ERRORFILE = stderr; + } + + virtual void TearDown() { + + } +}; + +static std::vector cgroups_parameters_invoked; + +static int mock_update_cgroups_parameters( + const char* controller_name, + const char* param_name, + const char* group_id, + const char* value) { + char* buf = (char*) malloc(128); + strcpy(buf, controller_name); + cgroups_parameters_invoked.push_back(buf); + + buf = (char*) malloc(128); + strcpy(buf, param_name); + cgroups_parameters_invoked.push_back(buf); + + buf = (char*) malloc(128); + strcpy(buf, group_id); + cgroups_parameters_invoked.push_back(buf); + + buf = (char*) malloc(128); + strcpy(buf, value); + cgroups_parameters_invoked.push_back(buf); + return 0; +} + +static void verify_param_updated_to_cgroups( + int argc, const char** argv) { + ASSERT_EQ(argc, cgroups_parameters_invoked.size()); + + int offset = 0; + while (offset < argc) { + ASSERT_STREQ(argv[offset], cgroups_parameters_invoked[offset]); + offset++; + } +} + +static void write_and_load_gpu_module_to_cfg(const char* cfg_filepath, int enabled) { + FILE *file = fopen(cfg_filepath, "w"); + if (file == NULL) { + printf("FAIL: Could not open configuration file: %s\n", cfg_filepath); + exit(1); + } + fprintf(file, "[gpu]\n"); + if (enabled) { + fprintf(file, "module.enabled=true\n"); + } else { + fprintf(file, "module.enabled=false\n"); + } + fclose(file); + + // Read config file + read_executor_config(cfg_filepath); + reload_gpu_configuration(); +} + +static void test_gpu_module_enabled_disabled(int enabled) { + // Write config file. + const char *filename = TEST_ROOT "/test_cgroups_module_enabled_disabled.cfg"; + write_and_load_gpu_module_to_cfg(filename, enabled); + + char* argv[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1", + (char*) "--container_id", + (char*) "container_1498064906505_0001_01_000001" }; + + int rc = handle_gpu_request(&mock_update_cgroups_parameters, + "gpu", 5, argv); + + int EXPECTED_RC; + if (enabled) { + EXPECTED_RC = 0; + } else { + EXPECTED_RC = -1; + } + ASSERT_EQ(EXPECTED_RC, rc); +} + +TEST_F(TestGpuModule, test_verify_gpu_module_calls_cgroup_parameter) { + // Write config file. + const char *filename = TEST_ROOT "/test_verify_gpu_module_calls_cgroup_parameter.cfg"; + write_and_load_gpu_module_to_cfg(filename, 1); + + char* container_id = (char*) "container_1498064906505_0001_01_000001"; + char* argv[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1", + (char*) "--container_id", + container_id }; + + /* Test case 1: block 2 devices */ + cgroups_parameters_invoked.clear(); + int rc = handle_gpu_request(&mock_update_cgroups_parameters, + "gpu", 5, argv); + ASSERT_EQ(0, rc) << "Should success.\n"; + + // Verify cgroups parameters + const char* expected_cgroups_argv[] = { "devices", "deny", container_id, "c 195:0 rwm", + "devices", "deny", container_id, "c 195:1 rwm"}; + verify_param_updated_to_cgroups(8, expected_cgroups_argv); + + /* Test case 2: block 0 devices */ + cgroups_parameters_invoked.clear(); + char* argv_1[] = { (char*) "--module-gpu", (char*) "--container_id", container_id }; + rc = handle_gpu_request(&mock_update_cgroups_parameters, + "gpu", 3, argv_1); + ASSERT_EQ(0, rc) << "Should success.\n"; + + // Verify cgroups parameters + verify_param_updated_to_cgroups(0, NULL); +} + +TEST_F(TestGpuModule, test_illegal_cli_parameters) { + // Write config file. + const char *filename = TEST_ROOT "/test_illegal_cli_parameters.cfg"; + write_and_load_gpu_module_to_cfg(filename, 1); + + // Illegal container id - 1 + char* argv[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1", + (char*) "--container_id", (char*) "xxxx" }; + int rc = handle_gpu_request(&mock_update_cgroups_parameters, + "gpu", 5, argv); + ASSERT_NE(0, rc) << "Should fail.\n"; + + // Illegal container id - 2 + char* argv_1[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1", + (char*) "--container_id", (char*) "container_1" }; + rc = handle_gpu_request(&mock_update_cgroups_parameters, + "gpu", 5, argv_1); + ASSERT_NE(0, rc) << "Should fail.\n"; + + // Illegal container id - 3 + char* argv_2[] = { (char*) "--module-gpu", (char*) "--excluded_gpus", (char*) "0,1" }; + rc = handle_gpu_request(&mock_update_cgroups_parameters, + "gpu", 3, argv_2); + ASSERT_NE(0, rc) << "Should fail.\n"; +} + +TEST_F(TestGpuModule, test_gpu_module_disabled) { + test_gpu_module_enabled_disabled(0); +} + +TEST_F(TestGpuModule, test_gpu_module_enabled) { + test_gpu_module_enabled_disabled(1); +} +} // namespace ContainerExecutor \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor-common.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor-common.h new file mode 100644 index 00000000000..d3536252025 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor-common.h @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #ifdef __APPLE__ + #include + #include + + #define TMPDIR "/private/tmp" + #define RELTMPDIR "../.." + #else + #define RELTMPDIR ".." + #define TMPDIR "/tmp" + #endif + + #define TEST_ROOT TMPDIR "/test-container-executor" + + #define DONT_TOUCH_FILE "dont-touch-me" + #define NM_LOCAL_DIRS TEST_ROOT "/local-1%" TEST_ROOT "/local-2%" \ + TEST_ROOT "/local-3%" TEST_ROOT "/local-4%" TEST_ROOT "/local-5" + #define NM_LOG_DIRS TEST_ROOT "/logs/userlogs" + #define ARRAY_SIZE 1000 \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c index b7d0e442f03..64ee717afdc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c @@ -17,6 +17,9 @@ */ #include "configuration.h" #include "container-executor.h" +#include "utils/string-utils.h" +#include "util.h" +#include "test/test-container-executor-common.h" #include #include @@ -29,25 +32,6 @@ #include #include -#ifdef __APPLE__ -#include -#include - -#define TMPDIR "/private/tmp" -#define RELTMPDIR "../.." -#else -#define RELTMPDIR ".." -#define TMPDIR "/tmp" -#endif - -#define TEST_ROOT TMPDIR "/test-container-executor" - -#define DONT_TOUCH_FILE "dont-touch-me" -#define NM_LOCAL_DIRS TEST_ROOT "/local-1%" TEST_ROOT "/local-2%" \ - TEST_ROOT "/local-3%" TEST_ROOT "/local-4%" TEST_ROOT "/local-5" -#define NM_LOG_DIRS TEST_ROOT "/logs/userlogs" -#define ARRAY_SIZE 1000 - static char* username = NULL; static char* yarn_username = NULL; static char** local_dirs = NULL; @@ -368,7 +352,7 @@ void test_delete_app() { sprintf(buffer, "chmod 000 %s/who/let", container_dir); run(buffer); - // delete container directory + // delete application directory int ret = delete_as_user(yarn_username, app_dir, NULL); if (ret != 0) { printf("FAIL: return code from delete_as_user is %d\n", ret); @@ -390,13 +374,20 @@ void test_delete_app() { printf("FAIL: accidently deleted file %s\n", dont_touch); exit(1); } + // verify attempt to delete a nonexistent directory does not fail + ret = delete_as_user(yarn_username, app_dir, NULL); + if (ret != 0) { + printf("FAIL: return code from delete_as_user is %d\n", ret); + exit(1); + } + free(app_dir); free(container_dir); free(dont_touch); } void validate_feature_enabled_value(int expected_value, const char* key, - int default_value, struct configuration *cfg) { + int default_value, struct section *cfg) { int value = is_feature_enabled(key, default_value, cfg); if (value != expected_value) { @@ -411,7 +402,8 @@ void test_is_feature_enabled() { FILE *file = fopen(filename, "w"); int disabled = 0; int enabled = 1; - struct configuration cfg = {.size=0, .confdetails=NULL}; + struct configuration exec_cfg = {.size=0, .sections=NULL}; + struct section cfg = {.size=0, .kv_pairs=NULL}; if (file == NULL) { printf("FAIL: Could not open configuration file: %s\n", filename); @@ -425,7 +417,8 @@ void test_is_feature_enabled() { fprintf(file, "feature.name5.enabled=-1\n"); fprintf(file, "feature.name6.enabled=2\n"); fclose(file); - read_config(filename, &cfg); + read_config(filename, &exec_cfg); + cfg = *(get_configuration_section("", &exec_cfg)); validate_feature_enabled_value(disabled, "feature.name1.enabled", disabled, &cfg); @@ -441,7 +434,7 @@ void test_is_feature_enabled() { disabled, &cfg); - free_configurations(&cfg); + free_configuration(&exec_cfg); } void test_delete_user() { @@ -975,6 +968,83 @@ static void expect_type(const char *path, int mode) { } } +static void test_delete_race_internal() { + char* app_dir = get_app_directory(TEST_ROOT "/local-2", yarn_username, "app_1"); + char* container_dir = get_container_work_directory(TEST_ROOT "/local-2", + yarn_username, "app_1", "container_1"); + char buffer[100000]; + + sprintf(buffer, "mkdir -p %s/a/b/c/d", container_dir); + run(buffer); + int i; + for (i = 0; i < 100; ++i) { + sprintf(buffer, "%s/a/f%d", container_dir, i); + touch_or_die(buffer); + sprintf(buffer, "%s/a/b/f%d", container_dir, i); + touch_or_die(buffer); + sprintf(buffer, "%s/a/b/c/f%d", container_dir, i); + touch_or_die(buffer); + sprintf(buffer, "%s/a/b/c/d/f%d", container_dir, i); + touch_or_die(buffer); + } + + pid_t child = fork(); + if (child == -1) { + printf("FAIL: fork failed\n"); + exit(1); + } else if (child == 0) { + // delete container directory + char * dirs[] = {app_dir, 0}; + int ret = delete_as_user(yarn_username, "container_1" , dirs); + if (ret != 0) { + printf("FAIL: return code from delete_as_user is %d\n", ret); + exit(1); + } + exit(0); + } else { + // delete application directory + int ret = delete_as_user(yarn_username, app_dir, NULL); + int status = 0; + if (waitpid(child, &status, 0) == -1) { + printf("FAIL: waitpid %" PRId64 " failed - %s\n", (int64_t)child, strerror(errno)); + exit(1); + } + if (!WIFEXITED(status)) { + printf("FAIL: child %" PRId64 " didn't exit - %d\n", (int64_t)child, status); + exit(1); + } + if (WEXITSTATUS(status) != 0) { + printf("FAIL: child %" PRId64 " exited with bad status %d\n", + (int64_t)child, WEXITSTATUS(status)); + exit(1); + } + if (ret != 0) { + printf("FAIL: return code from delete_as_user is %d\n", ret); + exit(1); + } + } + + // check to make sure the app directory is gone + if (access(app_dir, R_OK) == 0) { + printf("FAIL: didn't delete the directory - %s\n", app_dir); + exit(1); + } + + free(app_dir); + free(container_dir); +} + +void test_delete_race() { + if (initialize_user(yarn_username, local_dirs)) { + printf("FAIL: failed to initialize user %s\n", yarn_username); + exit(1); + } + int i; + for (i = 0; i < 100; ++i) { + test_delete_race_internal(); + } +} + int recursive_unlink_children(const char *name); void test_recursive_unlink_children() { @@ -1092,7 +1162,13 @@ void test_sanitize_docker_command() { "run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh", "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh", "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu' || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh", - "run ''''''''" + "run ''''''''", + "inspect --format='{{range(.NetworkSettings.Networks)}}{{.IPAddress}},{{end}}{{.Config.Hostname}}' container_e111_1111111111111_1111_01_111111", + "rm container_e111_1111111111111_1111_01_111111", + "stop container_e111_1111111111111_1111_01_111111", + "pull ubuntu", + "pull registry.com/user/ubuntu", + "--config=/yarn/local/cdir/ pull registry.com/user/ubuntu" }; char *expected_output[] = { "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ", @@ -1100,12 +1176,18 @@ void test_sanitize_docker_command() { "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ", "run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu'\"'\"'' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ", "run ''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"'' ", + "inspect --format='{{range(.NetworkSettings.Networks)}}{{.IPAddress}},{{end}}{{.Config.Hostname}}' container_e111_1111111111111_1111_01_111111", + "rm container_e111_1111111111111_1111_01_111111", + "stop container_e111_1111111111111_1111_01_111111", + "pull ubuntu", + "pull registry.com/user/ubuntu", + "--config=/yarn/local/cdir/ pull registry.com/user/ubuntu" }; int input_size = sizeof(input) / sizeof(char *); int i = 0; for(i = 0; i < input_size; i++) { - char *command = (char *) calloc(strlen(input[i]), sizeof(char)); + char *command = (char *) calloc(strlen(input[i]) + 1 , sizeof(char)); strncpy(command, input[i], strlen(input[i])); char *op = sanitize_docker_command(command); if(strncmp(expected_output[i], op, strlen(expected_output[i])) != 0) { @@ -1116,6 +1198,102 @@ void test_sanitize_docker_command() { } } +void test_validate_docker_image_name() { + + char *good_input[] = { + "ubuntu", + "ubuntu:latest", + "ubuntu:14.04", + "ubuntu:LATEST", + "registry.com:5000/user/ubuntu", + "registry.com:5000/user/ubuntu:latest", + "registry.com:5000/user/ubuntu:0.1.2.3", + "registry.com/user/ubuntu", + "registry.com/user/ubuntu:latest", + "registry.com/user/ubuntu:0.1.2.3", + "registry.com/user/ubuntu:test-image", + "registry.com/user/ubuntu:test_image", + "registry.com/ubuntu", + "user/ubuntu", + "user/ubuntu:0.1.2.3", + "user/ubuntu:latest", + "user/ubuntu:test_image", + "user/ubuntu.test:test_image", + "user/ubuntu-test:test-image", + "registry.com/ubuntu/ubuntu/ubuntu" + }; + + char *bad_input[] = { + "UBUNTU", + "registry.com|5000/user/ubuntu", + "registry.com | 5000/user/ubuntu", + "ubuntu' || touch /tmp/file #", + "ubuntu || touch /tmp/file #", + "''''''''", + "bad_host_name:5000/user/ubuntu", + "registry.com:foo/ubuntu/ubuntu/ubuntu", + "registry.com/ubuntu:foo/ubuntu/ubuntu" + }; + + int good_input_size = sizeof(good_input) / sizeof(char *); + int i = 0; + for(i = 0; i < good_input_size; i++) { + int op = validate_docker_image_name(good_input[i]); + if(0 != op) { + printf("\nFAIL: docker image name %s is invalid", good_input[i]); + exit(1); + } + } + + int bad_input_size = sizeof(bad_input) / sizeof(char *); + int j = 0; + for(j = 0; j < bad_input_size; j++) { + int op = validate_docker_image_name(bad_input[j]); + if(1 != op) { + printf("\nFAIL: docker image name %s is valid, expected invalid", bad_input[j]); + exit(1); + } + } +} + +void test_validate_container_id() { + char *good_input[] = { + "container_e134_1499953498516_50875_01_000007", + "container_1499953498516_50875_01_000007", + "container_e1_12312_11111_02_000001" + }; + + char *bad_input[] = { + "CONTAINER", + "container_e1_12312_11111_02_000001 | /tmp/file" + "container_e1_12312_11111_02_000001 || # /tmp/file", + "container_e1_12312_11111_02_000001 # /tmp/file", + "container_e1_12312_11111_02_000001' || touch /tmp/file #", + "ubuntu || touch /tmp/file #", + "''''''''" + }; + + int good_input_size = sizeof(good_input) / sizeof(char *); + int i = 0; + for(i = 0; i < good_input_size; i++) { + int op = validate_container_id(good_input[i]); + if(1 != op) { + printf("FAIL: docker container name %s is invalid\n", good_input[i]); + exit(1); + } + } + + int bad_input_size = sizeof(bad_input) / sizeof(char *); + int j = 0; + for(j = 0; j < bad_input_size; j++) { + int op = validate_container_id(bad_input[j]); + if(0 != op) { + printf("FAIL: docker container name %s is valid, expected invalid\n", bad_input[j]); + exit(1); + } + } +} + // This test is expected to be executed either by a regular // user or by root. If executed by a regular user it doesn't // test all the functions that would depend on changing the @@ -1152,8 +1330,8 @@ int main(int argc, char **argv) { read_executor_config(TEST_ROOT "/test.cfg"); - local_dirs = extract_values(strdup(NM_LOCAL_DIRS)); - log_dirs = extract_values(strdup(NM_LOG_DIRS)); + local_dirs = split(strdup(NM_LOCAL_DIRS)); + log_dirs = split(strdup(NM_LOG_DIRS)); create_nm_roots(local_dirs); @@ -1204,12 +1382,21 @@ int main(int argc, char **argv) { printf("\nTesting delete_app()\n"); test_delete_app(); + printf("\nTesting delete race\n"); + test_delete_race(); + printf("\nTesting is_feature_enabled()\n"); test_is_feature_enabled(); printf("\nTesting sanitize docker commands()\n"); test_sanitize_docker_command(); + printf("\nTesting validate_docker_image_name()\n"); + test_validate_docker_image_name(); + + printf("\nTesting validate_container_id()\n"); + test_validate_container_id(); + test_check_user(0); #ifdef __APPLE__ @@ -1281,10 +1468,7 @@ int main(int argc, char **argv) { test_check_user(1); #endif - run("rm -fr " TEST_ROOT); - test_trim_function(); - printf("\nFinished tests\n"); free(current_username); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc new file mode 100644 index 00000000000..6ee0ab2be1c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc @@ -0,0 +1,432 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +extern "C" { +#include "util.h" +#include "configuration.h" +#include "configuration.c" +} + + +namespace ContainerExecutor { + class TestConfiguration : public ::testing::Test { + protected: + virtual void SetUp() { + new_config_format_file = "test-configurations/configuration-1.cfg"; + old_config_format_file = "test-configurations/old-config.cfg"; + mixed_config_format_file = "test-configurations/configuration-2.cfg"; + loadConfigurations(); + return; + } + + void loadConfigurations() { + int ret = 0; + ret = read_config(new_config_format_file.c_str(), &new_config_format); + ASSERT_EQ(0, ret); + ret = read_config(old_config_format_file.c_str(), &old_config_format); + ASSERT_EQ(0, ret); + ret = read_config(mixed_config_format_file.c_str(), + &mixed_config_format); + ASSERT_EQ(0, ret); + } + + virtual void TearDown() { + free_configuration(&new_config_format); + free_configuration(&old_config_format); + return; + } + + std::string new_config_format_file; + std::string old_config_format_file; + std::string mixed_config_format_file; + struct configuration new_config_format; + struct configuration old_config_format; + struct configuration mixed_config_format; + }; + + + TEST_F(TestConfiguration, test_get_configuration_values_delimiter) { + char **split_values; + split_values = get_configuration_values_delimiter(NULL, "", &old_config_format, "%"); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values_delimiter("yarn.local.dirs", NULL, + &old_config_format, "%"); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values_delimiter("yarn.local.dirs", "", + NULL, "%"); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values_delimiter("yarn.local.dirs", "", + &old_config_format, NULL); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values_delimiter("yarn.local.dirs", "abcd", + &old_config_format, "%"); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values_delimiter("yarn.local.dirs", "", + &old_config_format, "%"); + ASSERT_STREQ("/var/run/yarn", split_values[0]); + ASSERT_STREQ("/tmp/mydir", split_values[1]); + ASSERT_EQ(NULL, split_values[2]); + free(split_values); + split_values = get_configuration_values_delimiter("allowed.system.users", + "", &old_config_format, "%"); + ASSERT_STREQ("nobody,daemon", split_values[0]); + ASSERT_EQ(NULL, split_values[1]); + free(split_values); + } + + TEST_F(TestConfiguration, test_get_configuration_values) { + char **split_values; + split_values = get_configuration_values(NULL, "", &old_config_format); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values("yarn.local.dirs", NULL, &old_config_format); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values("yarn.local.dirs", "", NULL); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values("yarn.local.dirs", "abcd", &old_config_format); + ASSERT_EQ(NULL, split_values); + split_values = get_configuration_values("yarn.local.dirs", "", &old_config_format); + ASSERT_STREQ("/var/run/yarn%/tmp/mydir", split_values[0]); + ASSERT_EQ(NULL, split_values[1]); + free(split_values); + split_values = get_configuration_values("allowed.system.users", "", + &old_config_format); + ASSERT_STREQ("nobody", split_values[0]); + ASSERT_STREQ("daemon", split_values[1]); + ASSERT_EQ(NULL, split_values[2]); + free(split_values); + } + + TEST_F(TestConfiguration, test_get_configuration_value) { + std::string key_value_array[5][2] = { + {"yarn.nodemanager.linux-container-executor.group", "yarn"}, + {"min.user.id", "1000"}, + {"allowed.system.users", "nobody,daemon"}, + {"feature.docker.enabled", "1"}, + {"yarn.local.dirs", "/var/run/yarn%/tmp/mydir"} + }; + char *value; + value = get_configuration_value(NULL, "", &old_config_format); + ASSERT_EQ(NULL, value); + value = get_configuration_value("yarn.local.dirs", NULL, &old_config_format); + ASSERT_EQ(NULL, value); + value = get_configuration_value("yarn.local.dirs", "", NULL); + ASSERT_EQ(NULL, value); + + for (int i = 0; i < 5; ++i) { + value = get_configuration_value(key_value_array[i][0].c_str(), + "", &old_config_format); + ASSERT_STREQ(key_value_array[i][1].c_str(), value); + free(value); + } + value = get_configuration_value("test.key", "", &old_config_format); + ASSERT_EQ(NULL, value); + value = get_configuration_value("test.key2", "", &old_config_format); + ASSERT_EQ(NULL, value); + value = get_configuration_value("feature.tc.enabled", "abcd", &old_config_format); + ASSERT_EQ(NULL, value); + } + + TEST_F(TestConfiguration, test_no_sections_format) { + const struct section *executor_cfg = get_configuration_section("", &old_config_format); + char *value = NULL; + value = get_section_value("yarn.nodemanager.linux-container-executor.group", executor_cfg); + ASSERT_STREQ("yarn", value); + value = get_section_value("feature.docker.enabled", executor_cfg); + ASSERT_STREQ("1", value); + value = get_section_value("feature.tc.enabled", executor_cfg); + ASSERT_STREQ("0", value); + value = get_section_value("min.user.id", executor_cfg); + ASSERT_STREQ("1000", value); + value = get_section_value("docker.binary", executor_cfg); + ASSERT_STREQ("/usr/bin/docker", value); + char **list = get_section_values("allowed.system.users", executor_cfg); + ASSERT_STREQ("nobody", list[0]); + ASSERT_STREQ("daemon", list[1]); + list = get_section_values("banned.users", executor_cfg); + ASSERT_STREQ("root", list[0]); + ASSERT_STREQ("testuser1", list[1]); + ASSERT_STREQ("testuser2", list[2]); + } + + TEST_F(TestConfiguration, test_get_section_values_delimiter) { + const struct section *section; + char *value; + char **split_values; + section = get_configuration_section("section-1", &new_config_format); + value = get_section_value("key1", section); + ASSERT_STREQ("value1", value); + free(value); + value = get_section_value("key2", section); + ASSERT_EQ(NULL, value); + split_values = get_section_values_delimiter(NULL, section, "%"); + ASSERT_EQ(NULL, split_values); + split_values = get_section_values_delimiter("split-key", NULL, "%"); + ASSERT_EQ(NULL, split_values); + split_values = get_section_values_delimiter("split-key", section, NULL); + ASSERT_EQ(NULL, split_values); + split_values = get_section_values_delimiter("split-key", section, "%"); + ASSERT_FALSE(split_values == NULL); + ASSERT_STREQ("val1,val2,val3", split_values[0]); + ASSERT_TRUE(split_values[1] == NULL); + free_values(split_values); + split_values = get_section_values_delimiter("perc-key", section, "%"); + ASSERT_FALSE(split_values == NULL); + ASSERT_STREQ("perc-val1", split_values[0]); + ASSERT_STREQ("perc-val2", split_values[1]); + ASSERT_TRUE(split_values[2] == NULL); + } + + TEST_F(TestConfiguration, test_get_section_values) { + const struct section *section; + char *value; + char **split_values; + section = get_configuration_section("section-1", &new_config_format); + value = get_section_value(NULL, section); + ASSERT_EQ(NULL, value); + value = get_section_value("key1", NULL); + ASSERT_EQ(NULL, value); + value = get_section_value("key1", section); + ASSERT_STREQ("value1", value); + free(value); + value = get_section_value("key2", section); + ASSERT_EQ(NULL, value); + split_values = get_section_values("split-key", section); + ASSERT_FALSE(split_values == NULL); + ASSERT_STREQ("val1", split_values[0]); + ASSERT_STREQ("val2", split_values[1]); + ASSERT_STREQ("val3", split_values[2]); + ASSERT_TRUE(split_values[3] == NULL); + free_values(split_values); + split_values = get_section_values("perc-key", section); + ASSERT_FALSE(split_values == NULL); + ASSERT_STREQ("perc-val1%perc-val2", split_values[0]); + ASSERT_TRUE(split_values[1] == NULL); + free_values(split_values); + section = get_configuration_section("section-2", &new_config_format); + value = get_section_value("key1", section); + ASSERT_STREQ("value2", value); + free(value); + value = get_section_value("key2", section); + ASSERT_STREQ("value2", value); + free(value); + } + + TEST_F(TestConfiguration, test_split_section) { + const struct section *section; + char *value; + section = get_configuration_section("split-section", &new_config_format); + value = get_section_value(NULL, section); + ASSERT_EQ(NULL, value); + value = get_section_value("key3", NULL); + ASSERT_EQ(NULL, value); + value = get_section_value("key3", section); + ASSERT_STREQ("value3", value); + free(value); + value = get_section_value("key4", section); + ASSERT_STREQ("value4", value); + + } + + TEST_F(TestConfiguration, test_get_configuration_section) { + const struct section *section; + ASSERT_EQ(3, new_config_format.size); + section = get_configuration_section(NULL, &new_config_format); + ASSERT_EQ(NULL, section); + section = get_configuration_section("section-1", NULL); + ASSERT_EQ(NULL, section); + section = get_configuration_section("section-1", &new_config_format); + ASSERT_FALSE(section == NULL); + ASSERT_STREQ("section-1", section->name); + ASSERT_EQ(3, section->size); + ASSERT_FALSE(NULL == section->kv_pairs); + section = get_configuration_section("section-2", &new_config_format); + ASSERT_FALSE(section == NULL); + ASSERT_STREQ("section-2", section->name); + ASSERT_EQ(2, section->size); + ASSERT_FALSE(NULL == section->kv_pairs); + section = get_configuration_section("section-3", &new_config_format); + ASSERT_TRUE(section == NULL); + } + + TEST_F(TestConfiguration, test_read_config) { + struct configuration config; + int ret = 0; + + ret = read_config(NULL, &config); + ASSERT_EQ(INVALID_CONFIG_FILE, ret); + ret = read_config("bad-config-file", &config); + ASSERT_EQ(INVALID_CONFIG_FILE, ret); + ret = read_config(new_config_format_file.c_str(), &config); + ASSERT_EQ(0, ret); + ASSERT_EQ(3, config.size); + ASSERT_STREQ("section-1", config.sections[0]->name); + ASSERT_STREQ("split-section", config.sections[1]->name); + ASSERT_STREQ("section-2", config.sections[2]->name); + free_configuration(&config); + ret = read_config(old_config_format_file.c_str(), &config); + ASSERT_EQ(0, ret); + ASSERT_EQ(1, config.size); + ASSERT_STREQ("", config.sections[0]->name); + free_configuration(&config); + } + + TEST_F(TestConfiguration, test_get_kv_key) { + int ret = 0; + char buff[1024]; + ret = get_kv_key(NULL, buff, 1024); + ASSERT_EQ(-EINVAL, ret); + ret = get_kv_key("key1234", buff, 1024); + ASSERT_EQ(-EINVAL, ret); + ret = get_kv_key("key=abcd", NULL, 1024); + ASSERT_EQ(-ENAMETOOLONG, ret); + ret = get_kv_key("key=abcd", buff, 1); + ASSERT_EQ(-ENAMETOOLONG, ret); + ret = get_kv_key("key=abcd", buff, 1024); + ASSERT_EQ(0, ret); + ASSERT_STREQ("key", buff); + } + + TEST_F(TestConfiguration, test_get_kv_value) { + int ret = 0; + char buff[1024]; + ret = get_kv_value(NULL, buff, 1024); + ASSERT_EQ(-EINVAL, ret); + ret = get_kv_value("key1234", buff, 1024); + ASSERT_EQ(-EINVAL, ret); + ret = get_kv_value("key=abcd", NULL, 1024); + ASSERT_EQ(-ENAMETOOLONG, ret); + ret = get_kv_value("key=abcd", buff, 1); + ASSERT_EQ(-ENAMETOOLONG, ret); + ret = get_kv_value("key=abcd", buff, 1024); + ASSERT_EQ(0, ret); + ASSERT_STREQ("abcd", buff); + } + + TEST_F(TestConfiguration, test_single_section_high_key_count) { + std::string section_name = "section-1"; + std::string sample_file_name = "large-section.cfg"; + std::ofstream sample_file; + sample_file.open(sample_file_name.c_str()); + sample_file << "[" << section_name << "]" << std::endl; + for(int i = 0; i < MAX_SIZE + 2; ++i) { + sample_file << "key" << i << "=" << "value" << i << std::endl; + } + struct configuration cfg; + int ret = read_config(sample_file_name.c_str(), &cfg); + ASSERT_EQ(0, ret); + ASSERT_EQ(1, cfg.size); + const struct section *section1 = get_configuration_section(section_name.c_str(), &cfg); + ASSERT_EQ(MAX_SIZE + 2, section1->size); + ASSERT_STREQ(section_name.c_str(), section1->name); + for(int i = 0; i < MAX_SIZE + 2; ++i) { + std::ostringstream oss; + oss << "key" << i; + const char *value = get_section_value(oss.str().c_str(), section1); + oss.str(""); + oss << "value" << i; + ASSERT_STREQ(oss.str().c_str(), value); + } + remove(sample_file_name.c_str()); + free_configuration(&cfg); + } + + TEST_F(TestConfiguration, test_multiple_sections) { + std::string sample_file_name = "multiple-sections.cfg"; + std::ofstream sample_file; + sample_file.open(sample_file_name.c_str()); + for(int i = 0; i < MAX_SIZE + 2; ++i) { + sample_file << "[section-" << i << "]" << std::endl; + sample_file << "key" << i << "=" << "value" << i << std::endl; + } + struct configuration cfg; + int ret = read_config(sample_file_name.c_str(), &cfg); + ASSERT_EQ(0, ret); + ASSERT_EQ(MAX_SIZE + 2, cfg.size); + for(int i = 0; i < MAX_SIZE + 2; ++i) { + std::ostringstream oss; + oss << "section-" << i; + const struct section *section = get_configuration_section(oss.str().c_str(), &cfg); + ASSERT_EQ(1, section->size); + ASSERT_STREQ(oss.str().c_str(), section->name); + oss.str(""); + oss << "key" << i; + const char *value = get_section_value(oss.str().c_str(), section); + oss.str(""); + oss << "value" << i; + ASSERT_STREQ(oss.str().c_str(), value); + } + remove(sample_file_name.c_str()); + free_configuration(&cfg); + } + + TEST_F(TestConfiguration, test_section_start_line) { + const char *section_start_line = "[abcd]"; + const char *non_section_lines[] = { + "[abcd", "abcd]", "key=value", "#abcd" + }; + int ret = is_section_start_line(section_start_line); + ASSERT_EQ(1, ret); + int length = sizeof(non_section_lines) / sizeof(*non_section_lines); + for( int i = 0; i < length; ++i) { + ret = is_section_start_line(non_section_lines[i]); + ASSERT_EQ(0, ret); + } + ret = is_section_start_line(NULL); + ASSERT_EQ(0, ret); + } + + TEST_F(TestConfiguration, test_comment_line) { + const char *comment_line = "#[abcd]"; + const char *non_comment_lines[] = { + "[abcd", "abcd]", "key=value", "[abcd]" + }; + int ret = is_comment_line(comment_line); + ASSERT_EQ(1, ret); + int length = sizeof(non_comment_lines) / sizeof(*non_comment_lines); + for( int i = 0; i < length; ++i) { + ret = is_comment_line(non_comment_lines[i]); + ASSERT_EQ(0, ret); + } + ret = is_comment_line(NULL); + ASSERT_EQ(0, ret); + } + + TEST_F(TestConfiguration, test_mixed_config_format) { + const struct section *executor_cfg = + get_configuration_section("", &mixed_config_format); + char *value = NULL; + value = get_section_value("key1", executor_cfg); + ASSERT_STREQ("value1", value); + value = get_section_value("key2", executor_cfg); + ASSERT_STREQ("value2", value); + ASSERT_EQ(2, executor_cfg->size); + executor_cfg = get_configuration_section("section-1", + &mixed_config_format); + value = get_section_value("key3", executor_cfg); + ASSERT_STREQ("value3", value); + value = get_section_value("key1", executor_cfg); + ASSERT_STREQ("value4", value); + ASSERT_EQ(2, executor_cfg->size); + ASSERT_EQ(2, mixed_config_format.size); + ASSERT_STREQ("", mixed_config_format.sections[0]->name); + ASSERT_STREQ("section-1", mixed_config_format.sections[1]->name); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc new file mode 100644 index 00000000000..44c9b1bc5c0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_main.cc @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include
+#include + +extern "C" { +#include "util.h" +} + +int main(int argc, char **argv) { + ERRORFILE = stderr; + LOGFILE = stdout; + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_util.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_util.cc new file mode 100644 index 00000000000..2ec7b2a09c7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_util.cc @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +extern "C" { +#include "util.h" +} + +namespace ContainerExecutor { + + class TestUtil : public ::testing::Test { + protected: + virtual void SetUp() { + } + + virtual void TearDown() { + } + }; + + TEST_F(TestUtil, test_split_delimiter) { + std::string str = "1,2,3,4,5,6,7,8,9,10,11"; + char *split_string = (char *) calloc(str.length() + 1, sizeof(char)); + strncpy(split_string, str.c_str(), str.length()); + char **splits = split_delimiter(split_string, ","); + ASSERT_TRUE(splits != NULL); + int count = 0; + while(splits[count] != NULL) { + ++count; + } + ASSERT_EQ(11, count); + for(int i = 1; i < count; ++i) { + std::ostringstream oss; + oss << i; + ASSERT_STREQ(oss.str().c_str(), splits[i-1]); + } + ASSERT_EQ(NULL, splits[count]); + free_values(splits); + + split_string = (char *) calloc(str.length() + 1, sizeof(char)); + strncpy(split_string, str.c_str(), str.length()); + splits = split_delimiter(split_string, "%"); + ASSERT_TRUE(splits != NULL); + ASSERT_TRUE(splits[1] == NULL); + ASSERT_STREQ(str.c_str(), splits[0]); + free_values(splits); + + splits = split_delimiter(NULL, ","); + ASSERT_EQ(NULL, splits); + return; + } + + TEST_F(TestUtil, test_split) { + std::string str = "1%2%3%4%5%6%7%8%9%10%11"; + char *split_string = (char *) calloc(str.length() + 1, sizeof(char)); + strncpy(split_string, str.c_str(), str.length()); + char **splits = split(split_string); + int count = 0; + while(splits[count] != NULL) { + ++count; + } + ASSERT_EQ(11, count); + for(int i = 1; i < count; ++i) { + std::ostringstream oss; + oss << i; + ASSERT_STREQ(oss.str().c_str(), splits[i-1]); + } + ASSERT_EQ(NULL, splits[count]); + free_values(splits); + + str = "1,2,3,4,5,6,7,8,9,10,11"; + split_string = (char *) calloc(str.length() + 1, sizeof(char)); + strncpy(split_string, str.c_str(), str.length()); + splits = split(split_string); + ASSERT_TRUE(splits != NULL); + ASSERT_TRUE(splits[1] == NULL); + ASSERT_STREQ(str.c_str(), splits[0]); + return; + } + + TEST_F(TestUtil, test_trim) { + char* trimmed = NULL; + + // Check NULL input + ASSERT_EQ(NULL, trim(NULL)); + + // Check empty input + trimmed = trim(""); + ASSERT_STREQ("", trimmed); + free(trimmed); + + // Check single space input + trimmed = trim(" "); + ASSERT_STREQ("", trimmed); + free(trimmed); + + // Check multi space input + trimmed = trim(" "); + ASSERT_STREQ("", trimmed); + free(trimmed); + + // Check both side trim input + trimmed = trim(" foo "); + ASSERT_STREQ("foo", trimmed); + free(trimmed); + + // Check left side trim input + trimmed = trim("foo "); + ASSERT_STREQ("foo", trimmed); + free(trimmed); + + // Check right side trim input + trimmed = trim(" foo"); + ASSERT_STREQ("foo", trimmed); + free(trimmed); + + // Check no trim input + trimmed = trim("foo"); + ASSERT_STREQ("foo", trimmed); + free(trimmed); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-path-utils.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-path-utils.cc new file mode 100644 index 00000000000..a24c0c70f7a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-path-utils.cc @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include + #include + + extern "C" { + #include "utils/path-utils.h" + } + + namespace ContainerExecutor { + + class TestPathUtils : public ::testing::Test { + protected: + virtual void SetUp() { + + } + + virtual void TearDown() { + + } + }; + + TEST_F(TestPathUtils, test_path_safety) { + const char* input = "./../abc/"; + int flag = verify_path_safety(input); + std::cout << "Testing input=" << input << "\n"; + ASSERT_FALSE(flag) << "Should failed\n"; + + input = "abc/./cde"; + flag = verify_path_safety(input); + std::cout << "Testing input=" << input << "\n"; + ASSERT_TRUE(flag) << "Should succeeded\n"; + + input = "/etc/abc/cde/./x/./y"; + flag = verify_path_safety(input); + std::cout << "Testing input=" << input << "\n"; + ASSERT_TRUE(flag) << "Should succeeded\n"; +} + +} // namespace ContainerExecutor \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc new file mode 100644 index 00000000000..037816a1546 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/utils/test-string-utils.cc @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include + #include + + extern "C" { + #include "utils/string-utils.h" + } + + namespace ContainerExecutor { + + class TestStringUtils : public ::testing::Test { + protected: + virtual void SetUp() { + + } + + virtual void TearDown() { + + } + }; + + TEST_F(TestStringUtils, test_get_numbers_split_by_comma) { + const char* input = ",1,2,3,-1,,1,,0,"; + int* numbers; + size_t n_numbers; + int rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(6, n_numbers); + ASSERT_EQ(1, numbers[0]); + ASSERT_EQ(-1, numbers[3]); + ASSERT_EQ(0, numbers[5]); + + input = "3"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(1, n_numbers); + ASSERT_EQ(3, numbers[0]); + + input = ""; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(0, n_numbers); + + input = ",,"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_EQ(0, rc) << "Should succeeded\n"; + ASSERT_EQ(0, n_numbers); + + input = "1,2,aa,bb"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_TRUE(0 != rc) << "Should failed\n"; + + input = "1,2,3,-12312312312312312312321311231231231"; + rc = get_numbers_split_by_comma(input, &numbers, &n_numbers); + std::cout << "Testing input=" << input << "\n"; + ASSERT_TRUE(0 != rc) << "Should failed\n"; +} + +} // namespace ContainerExecutor \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java index e529628b710..095f21a4f4b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java @@ -128,8 +128,12 @@ public class TestDirectoryCollection { DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F); dc.checkDirs(); Assert.assertEquals(0, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); Assert.assertEquals(1, dc.getFailedDirs().size()); Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); + Assert.assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL, dc.getDirectoryErrorInfo(dirA).cause); + // no good dirs Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage()); @@ -139,16 +143,21 @@ public class TestDirectoryCollection { testDir.getTotalSpace()); dc.checkDirs(); Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); Assert.assertEquals(0, dc.getFailedDirs().size()); Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + Assert.assertEquals(utilizedSpacePerc, dc.getGoodDirsDiskUtilizationPercentage()); dc = new DirectoryCollection(dirs, testDir.getTotalSpace() / (1024 * 1024)); dc.checkDirs(); Assert.assertEquals(0, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); Assert.assertEquals(1, dc.getFailedDirs().size()); Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); // no good dirs Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage()); @@ -158,8 +167,11 @@ public class TestDirectoryCollection { testDir.getTotalSpace()); dc.checkDirs(); Assert.assertEquals(1, dc.getGoodDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); Assert.assertEquals(0, dc.getFailedDirs().size()); Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); + Assert.assertEquals(utilizedSpacePerc, dc.getGoodDirsDiskUtilizationPercentage()); } @@ -209,12 +221,17 @@ public class TestDirectoryCollection { Assert.assertEquals(0, dc.getGoodDirs().size()); Assert.assertEquals(1, dc.getFailedDirs().size()); Assert.assertEquals(1, dc.getFullDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirA)); + Assert.assertEquals(DirectoryCollection.DiskErrorCause.DISK_FULL, dc.getDirectoryErrorInfo(dirA).cause); dc.setDiskUtilizationPercentageCutoff(100.0F, 100.0F); dc.checkDirs(); Assert.assertEquals(1, dc.getGoodDirs().size()); Assert.assertEquals(0, dc.getFailedDirs().size()); Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); @@ -232,12 +249,18 @@ public class TestDirectoryCollection { Assert.assertEquals(0, dc.getGoodDirs().size()); Assert.assertEquals(1, dc.getFailedDirs().size()); Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertEquals(1, dc.getErroredDirs().size()); + Assert.assertNotNull(dc.getDirectoryErrorInfo(dirB)); + Assert.assertEquals(DirectoryCollection.DiskErrorCause.OTHER, dc.getDirectoryErrorInfo(dirB).cause); + permDirB = new FsPermission((short) 0700); localFs.setPermission(pathB, permDirB); dc.checkDirs(); Assert.assertEquals(1, dc.getGoodDirs().size()); Assert.assertEquals(0, dc.getFailedDirs().size()); Assert.assertEquals(0, dc.getFullDirs().size()); + Assert.assertEquals(0, dc.getErroredDirs().size()); + Assert.assertNull(dc.getDirectoryErrorInfo(dirA)); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index c5c74d1dd95..b8cd7ddf9bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -45,9 +45,9 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -224,7 +224,7 @@ public class TestNodeManagerResync { // Start a container and make sure it is in RUNNING state ((TestNodeManager4)nm).startContainer(); // Simulate a container resource increase in a separate thread - ((TestNodeManager4)nm).increaseContainersResource(); + ((TestNodeManager4)nm).updateContainerResource(); // Simulate RM restart by sending a RESYNC event LOG.info("Sending out RESYNC event"); nm.getNMDispatcher().getEventHandler().handle( @@ -505,7 +505,7 @@ public class TestNodeManagerResync { class TestNodeManager4 extends NodeManager { - private Thread increaseContainerResourceThread = null; + private Thread containerUpdateResourceThread = null; @Override protected NodeStatusUpdater createNodeStatusUpdater(Context context, @@ -621,11 +621,11 @@ public class TestNodeManagerResync { } // Increase container resource in a thread - public void increaseContainersResource() + public void updateContainerResource() throws InterruptedException { LOG.info("Increase a container resource in a separate thread"); - increaseContainerResourceThread = new IncreaseContainersResourceThread(); - increaseContainerResourceThread.start(); + containerUpdateResourceThread = new ContainerUpdateResourceThread(); + containerUpdateResourceThread.start(); } class TestNodeStatusUpdaterImpl4 extends MockNodeStatusUpdater { @@ -652,7 +652,7 @@ public class TestNodeManagerResync { updateBarrier.await(); // Call the actual rebootNodeStatusUpdaterAndRegisterWithRM(). // This function should be synchronized with - // increaseContainersResource(). + // updateContainer(). updateBarrier.await(); super.rebootNodeStatusUpdaterAndRegisterWithRM(); // Check status after registerWithRM @@ -672,7 +672,7 @@ public class TestNodeManagerResync { } } - class IncreaseContainersResourceThread extends Thread { + class ContainerUpdateResourceThread extends Thread { @Override public void run() { // Construct container resource increase request @@ -682,16 +682,16 @@ public class TestNodeManagerResync { try{ try { updateBarrier.await(); - increaseTokens.add(getContainerToken(targetResource)); - IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest.newInstance(increaseTokens); - IncreaseContainersResourceResponse increaseResponse = + increaseTokens.add(getContainerToken(targetResource, 1)); + ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(increaseTokens); + ContainerUpdateResponse updateResponse = getContainerManager() - .increaseContainersResource(increaseRequest); + .updateContainer(updateRequest); Assert.assertEquals( - 1, increaseResponse.getSuccessfullyIncreasedContainers() + 1, updateResponse.getSuccessfullyUpdatedContainers() .size()); - Assert.assertTrue(increaseResponse.getFailedRequests().isEmpty()); + Assert.assertTrue(updateResponse.getFailedRequests().isEmpty()); } catch (Exception e) { e.printStackTrace(); } finally { @@ -710,6 +710,15 @@ public class TestNodeManagerResync { getNMContext().getNodeId(), user, resource, getNMContext().getContainerTokenSecretManager(), null); } + + private Token getContainerToken(Resource resource, int version) + throws IOException { + ContainerId cId = TestContainerManager.createContainerId(0); + return TestContainerManager.createContainerToken( + cId, version, DUMMY_RM_IDENTIFIER, + getNMContext().getNodeId(), user, resource, + getNMContext().getContainerTokenSecretManager(), null); + } } public static NMContainerStatus createNMContainerStatus(int id, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockRequestInterceptor.java index c962f97a020..1cbb2378f45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/MockRequestInterceptor.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; public class MockRequestInterceptor extends AbstractRequestInterceptor { @@ -38,22 +39,21 @@ public class MockRequestInterceptor extends AbstractRequestInterceptor { public void init(AMRMProxyApplicationContext appContext) { super.init(appContext); - mockRM = - new MockResourceManagerFacade(new YarnConfiguration( - super.getConf()), 0); + mockRM = new MockResourceManagerFacade( + new YarnConfiguration(super.getConf()), 0); } @Override public RegisterApplicationMasterResponse registerApplicationMaster( - RegisterApplicationMasterRequest request) throws YarnException, - IOException { + RegisterApplicationMasterRequest request) + throws YarnException, IOException { return mockRM.registerApplicationMaster(request); } @Override public FinishApplicationMasterResponse finishApplicationMaster( - FinishApplicationMasterRequest request) throws YarnException, - IOException { + FinishApplicationMasterRequest request) + throws YarnException, IOException { return mockRM.finishApplicationMaster(request); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java index e734bdd63d1..72e5f53b551 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java @@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyService.RequestInterceptorChainWrapper; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java new file mode 100644 index 00000000000..34b07416a21 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestFederationInterceptor.java @@ -0,0 +1,496 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.amrmproxy; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; +import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.util.Records; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extends the TestAMRMProxyService and overrides methods in order to use the + * AMRMProxyService's pipeline test cases for testing the FederationInterceptor + * class. The tests for AMRMProxyService has been written cleverly so that it + * can be reused to validate different request intercepter chains. + */ +public class TestFederationInterceptor extends BaseAMRMProxyTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestFederationInterceptor.class); + + public static final String HOME_SC_ID = "SC-home"; + + private TestableFederationInterceptor interceptor; + private MemoryFederationStateStore stateStore; + + private int testAppId; + private ApplicationAttemptId attemptId; + + @Override + public void setUp() throws IOException { + super.setUp(); + interceptor = new TestableFederationInterceptor(); + + stateStore = new MemoryFederationStateStore(); + stateStore.init(getConf()); + FederationStateStoreFacade.getInstance().reinitialize(stateStore, + getConf()); + + testAppId = 1; + attemptId = getApplicationAttemptId(testAppId); + interceptor.init(new AMRMProxyApplicationContextImpl(null, getConf(), + attemptId, "test-user", null, null)); + } + + @Override + public void tearDown() { + interceptor.shutdown(); + super.tearDown(); + } + + @Override + protected YarnConfiguration createConfiguration() { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true); + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + String mockPassThroughInterceptorClass = + PassThroughRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain is the federation intercepter that calls the mock resource manager. + // The others in the chain will simply forward it to the next one in the + // chain + conf.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + + "," + TestableFederationInterceptor.class.getName()); + + conf.set(YarnConfiguration.FEDERATION_POLICY_MANAGER, + UniformBroadcastPolicyManager.class.getName()); + + conf.set(YarnConfiguration.RM_CLUSTER_ID, HOME_SC_ID); + + // Disable StateStoreFacade cache + conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0); + + return conf; + } + + private void registerSubCluster(SubClusterId subClusterId) + throws YarnException { + stateStore + .registerSubCluster(SubClusterRegisterRequest.newInstance(SubClusterInfo + .newInstance(subClusterId, "1.2.3.4:1", "1.2.3.4:2", "1.2.3.4:3", + "1.2.3.4:4", SubClusterState.SC_RUNNING, 0, "capacity"))); + } + + private void deRegisterSubCluster(SubClusterId subClusterId) + throws YarnException { + stateStore.deregisterSubCluster(SubClusterDeregisterRequest + .newInstance(subClusterId, SubClusterState.SC_UNREGISTERED)); + } + + private List getContainersAndAssert(int numberOfResourceRequests, + int numberOfAllocationExcepted) throws Exception { + AllocateRequest allocateRequest = Records.newRecord(AllocateRequest.class); + allocateRequest.setResponseId(1); + + List containers = + new ArrayList(numberOfResourceRequests); + List askList = + new ArrayList(numberOfResourceRequests); + for (int id = 0; id < numberOfResourceRequests; id++) { + askList.add(createResourceRequest("test-node-" + Integer.toString(id), + 6000, 2, id % 5, 1)); + } + + allocateRequest.setAskList(askList); + + AllocateResponse allocateResponse = interceptor.allocate(allocateRequest); + Assert.assertNotNull("allocate() returned null response", allocateResponse); + + containers.addAll(allocateResponse.getAllocatedContainers()); + LOG.info("Number of allocated containers in the original request: " + + Integer.toString(allocateResponse.getAllocatedContainers().size())); + + // Send max 10 heart beats to receive all the containers. If not, we will + // fail the test + int numHeartbeat = 0; + while (containers.size() < numberOfAllocationExcepted + && numHeartbeat++ < 10) { + allocateResponse = + interceptor.allocate(Records.newRecord(AllocateRequest.class)); + Assert.assertNotNull("allocate() returned null response", + allocateResponse); + + containers.addAll(allocateResponse.getAllocatedContainers()); + + LOG.info("Number of allocated containers in this request: " + + Integer.toString(allocateResponse.getAllocatedContainers().size())); + LOG.info("Total number of allocated containers: " + + Integer.toString(containers.size())); + Thread.sleep(10); + } + Assert.assertEquals(numberOfAllocationExcepted, containers.size()); + return containers; + } + + private void releaseContainersAndAssert(List containers) + throws Exception { + Assert.assertTrue(containers.size() > 0); + AllocateRequest allocateRequest = Records.newRecord(AllocateRequest.class); + allocateRequest.setResponseId(1); + + List relList = new ArrayList(containers.size()); + for (Container container : containers) { + relList.add(container.getId()); + } + + allocateRequest.setReleaseList(relList); + + AllocateResponse allocateResponse = interceptor.allocate(allocateRequest); + Assert.assertNotNull(allocateResponse); + + // The way the mock resource manager is setup, it will return the containers + // that were released in the allocated containers. The release request will + // be split and handled by the corresponding UAM. The release containers + // returned by the mock resource managers will be aggregated and returned + // back to us and we can check if total request size and returned size are + // the same + List containersForReleasedContainerIds = + new ArrayList(); + containersForReleasedContainerIds + .addAll(allocateResponse.getAllocatedContainers()); + LOG.info("Number of containers received in the original request: " + + Integer.toString(allocateResponse.getAllocatedContainers().size())); + + // Send max 10 heart beats to receive all the containers. If not, we will + // fail the test + int numHeartbeat = 0; + while (containersForReleasedContainerIds.size() < relList.size() + && numHeartbeat++ < 10) { + allocateResponse = + interceptor.allocate(Records.newRecord(AllocateRequest.class)); + Assert.assertNotNull(allocateResponse); + containersForReleasedContainerIds + .addAll(allocateResponse.getAllocatedContainers()); + + LOG.info("Number of containers received in this request: " + + Integer.toString(allocateResponse.getAllocatedContainers().size())); + LOG.info("Total number of containers received: " + + Integer.toString(containersForReleasedContainerIds.size())); + Thread.sleep(10); + } + + Assert.assertEquals(relList.size(), + containersForReleasedContainerIds.size()); + } + + @Test + public void testMultipleSubClusters() throws Exception { + + // Register the application + RegisterApplicationMasterRequest registerReq = + Records.newRecord(RegisterApplicationMasterRequest.class); + registerReq.setHost(Integer.toString(testAppId)); + registerReq.setRpcPort(0); + registerReq.setTrackingUrl(""); + + RegisterApplicationMasterResponse registerResponse = + interceptor.registerApplicationMaster(registerReq); + Assert.assertNotNull(registerResponse); + + Assert.assertEquals(0, interceptor.getUnmanagedAMPoolSize()); + + // Allocate the first batch of containers, with sc1 and sc2 active + registerSubCluster(SubClusterId.newInstance("SC-1")); + registerSubCluster(SubClusterId.newInstance("SC-2")); + + int numberOfContainers = 3; + List containers = + getContainersAndAssert(numberOfContainers, numberOfContainers * 2); + Assert.assertEquals(2, interceptor.getUnmanagedAMPoolSize()); + + // Allocate the second batch of containers, with sc1 and sc3 active + deRegisterSubCluster(SubClusterId.newInstance("SC-2")); + registerSubCluster(SubClusterId.newInstance("SC-3")); + + numberOfContainers = 1; + containers.addAll( + getContainersAndAssert(numberOfContainers, numberOfContainers * 2)); + Assert.assertEquals(3, interceptor.getUnmanagedAMPoolSize()); + + // Allocate the third batch of containers with only in home sub-cluster + // active + deRegisterSubCluster(SubClusterId.newInstance("SC-1")); + deRegisterSubCluster(SubClusterId.newInstance("SC-3")); + registerSubCluster(SubClusterId.newInstance(HOME_SC_ID)); + + numberOfContainers = 2; + containers.addAll( + getContainersAndAssert(numberOfContainers, numberOfContainers * 1)); + Assert.assertEquals(3, interceptor.getUnmanagedAMPoolSize()); + + // Release all containers + releaseContainersAndAssert(containers); + + // Finish the application + FinishApplicationMasterRequest finishReq = + Records.newRecord(FinishApplicationMasterRequest.class); + finishReq.setDiagnostics(""); + finishReq.setTrackingUrl(""); + finishReq.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED); + + FinishApplicationMasterResponse finshResponse = + interceptor.finishApplicationMaster(finishReq); + Assert.assertNotNull(finshResponse); + Assert.assertEquals(true, finshResponse.getIsUnregistered()); + } + + /* + * Test re-register when RM fails over. + */ + @Test + public void testReregister() throws Exception { + + // Register the application + RegisterApplicationMasterRequest registerReq = + Records.newRecord(RegisterApplicationMasterRequest.class); + registerReq.setHost(Integer.toString(testAppId)); + registerReq.setRpcPort(0); + registerReq.setTrackingUrl(""); + + RegisterApplicationMasterResponse registerResponse = + interceptor.registerApplicationMaster(registerReq); + Assert.assertNotNull(registerResponse); + + Assert.assertEquals(0, interceptor.getUnmanagedAMPoolSize()); + + // Allocate the first batch of containers + registerSubCluster(SubClusterId.newInstance("SC-1")); + registerSubCluster(SubClusterId.newInstance(HOME_SC_ID)); + + interceptor.setShouldReRegisterNext(); + + int numberOfContainers = 3; + List containers = + getContainersAndAssert(numberOfContainers, numberOfContainers * 2); + Assert.assertEquals(1, interceptor.getUnmanagedAMPoolSize()); + + interceptor.setShouldReRegisterNext(); + + // Release all containers + releaseContainersAndAssert(containers); + + interceptor.setShouldReRegisterNext(); + + // Finish the application + FinishApplicationMasterRequest finishReq = + Records.newRecord(FinishApplicationMasterRequest.class); + finishReq.setDiagnostics(""); + finishReq.setTrackingUrl(""); + finishReq.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED); + + FinishApplicationMasterResponse finshResponse = + interceptor.finishApplicationMaster(finishReq); + Assert.assertNotNull(finshResponse); + Assert.assertEquals(true, finshResponse.getIsUnregistered()); + } + + /* + * Test concurrent register threads. This is possible because the timeout + * between AM and AMRMProxy is shorter than the timeout + failOver between + * FederationInterceptor (AMRMProxy) and RM. When first call is blocked due to + * RM failover and AM timeout, it will call us resulting in a second register + * thread. + */ + @Test(timeout = 5000) + public void testConcurrentRegister() + throws InterruptedException, ExecutionException { + ExecutorService threadpool = Executors.newCachedThreadPool(); + ExecutorCompletionService compSvc = + new ExecutorCompletionService<>(threadpool); + + Object syncObj = MockResourceManagerFacade.getSyncObj(); + + // Two register threads + synchronized (syncObj) { + // Make sure first thread will block within RM, before the second thread + // starts + LOG.info("Starting first register thread"); + compSvc.submit(new ConcurrentRegisterAMCallable()); + + try { + LOG.info("Test main starts waiting for the first thread to block"); + syncObj.wait(); + LOG.info("Test main wait finished"); + } catch (Exception e) { + LOG.info("Test main wait interrupted", e); + } + } + + // The second thread will get already registered exception from RM. + LOG.info("Starting second register thread"); + compSvc.submit(new ConcurrentRegisterAMCallable()); + + // Notify the first register thread to return + LOG.info("Let first blocked register thread move on"); + synchronized (syncObj) { + syncObj.notifyAll(); + } + + // Both thread should return without exception + RegisterApplicationMasterResponse response = compSvc.take().get(); + Assert.assertNotNull(response); + + response = compSvc.take().get(); + Assert.assertNotNull(response); + + threadpool.shutdown(); + } + + /** + * A callable that calls registerAM to RM with blocking. + */ + public class ConcurrentRegisterAMCallable + implements Callable { + @Override + public RegisterApplicationMasterResponse call() throws Exception { + RegisterApplicationMasterResponse response = null; + try { + // Use port number 1001 to let mock RM block in the register call + response = interceptor.registerApplicationMaster( + RegisterApplicationMasterRequest.newInstance(null, 1001, null)); + } catch (Exception e) { + LOG.info("Register thread exception", e); + response = null; + } + return response; + } + } + + @Test + public void testRequestInterceptorChainCreation() throws Exception { + RequestInterceptor root = + super.getAMRMProxyService().createRequestInterceptorChain(); + int index = 0; + while (root != null) { + switch (index) { + case 0: + case 1: + Assert.assertEquals(PassThroughRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + case 2: + Assert.assertEquals(TestableFederationInterceptor.class.getName(), + root.getClass().getName()); + break; + default: + Assert.fail(); + } + root = root.getNextInterceptor(); + index++; + } + Assert.assertEquals("The number of interceptors in chain does not match", + Integer.toString(3), Integer.toString(index)); + } + + /** + * Between AM and AMRMProxy, FederationInterceptor modifies the RM behavior, + * so that when AM registers more than once, it returns the same register + * success response instead of throwing + * {@link InvalidApplicationMasterRequestException} + * + * We did this because FederationInterceptor can receive concurrent register + * requests from AM because of timeout between AM and AMRMProxy. This can + * possible since the timeout between FederationInterceptor and RM longer + * because of performFailover + timeout. + */ + @Test + public void testTwoIdenticalRegisterRequest() throws Exception { + // Register the application twice + RegisterApplicationMasterRequest registerReq = + Records.newRecord(RegisterApplicationMasterRequest.class); + registerReq.setHost(Integer.toString(testAppId)); + registerReq.setRpcPort(0); + registerReq.setTrackingUrl(""); + + for (int i = 0; i < 2; i++) { + RegisterApplicationMasterResponse registerResponse = + interceptor.registerApplicationMaster(registerReq); + Assert.assertNotNull(registerResponse); + } + } + + @Test + public void testTwoDifferentRegisterRequest() throws Exception { + // Register the application first time + RegisterApplicationMasterRequest registerReq = + Records.newRecord(RegisterApplicationMasterRequest.class); + registerReq.setHost(Integer.toString(testAppId)); + registerReq.setRpcPort(0); + registerReq.setTrackingUrl(""); + + RegisterApplicationMasterResponse registerResponse = + interceptor.registerApplicationMaster(registerReq); + Assert.assertNotNull(registerResponse); + + // Register the application second time with a different request obj + registerReq = Records.newRecord(RegisterApplicationMasterRequest.class); + registerReq.setHost(Integer.toString(testAppId)); + registerReq.setRpcPort(0); + registerReq.setTrackingUrl("different"); + try { + registerResponse = interceptor.registerApplicationMaster(registerReq); + Assert.fail("Should throw if a different request obj is used"); + } catch (YarnException e) { + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java new file mode 100644 index 00000000000..d4b8735d464 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.amrmproxy; + +import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; +import org.apache.hadoop.yarn.server.uam.UnmanagedAMPoolManager; +import org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager; + +/** + * Extends the FederationInterceptor and overrides methods to provide a testable + * implementation of FederationInterceptor. + */ +public class TestableFederationInterceptor extends FederationInterceptor { + private ConcurrentHashMap + secondaryResourceManagers = new ConcurrentHashMap<>(); + private AtomicInteger runningIndex = new AtomicInteger(0); + private MockResourceManagerFacade mockRm; + + @Override + protected UnmanagedAMPoolManager createUnmanagedAMPoolManager( + ExecutorService threadPool) { + return new TestableUnmanagedAMPoolManager(threadPool); + } + + @Override + protected ApplicationMasterProtocol createHomeRMProxy( + AMRMProxyApplicationContext appContext) { + synchronized (this) { + if (mockRm == null) { + mockRm = new MockResourceManagerFacade( + new YarnConfiguration(super.getConf()), 0); + } + } + return mockRm; + } + + @SuppressWarnings("unchecked") + protected T createSecondaryRMProxy(Class proxyClass, + Configuration conf, String subClusterId) throws IOException { + // We create one instance of the mock resource manager per sub cluster. Keep + // track of the instances of the RMs in the map keyed by the sub cluster id + synchronized (this.secondaryResourceManagers) { + if (this.secondaryResourceManagers.contains(subClusterId)) { + return (T) this.secondaryResourceManagers.get(subClusterId); + } else { + // The running index here is used to simulate different RM_EPOCH to + // generate unique container identifiers in a federation environment + MockResourceManagerFacade rm = new MockResourceManagerFacade( + new Configuration(conf), runningIndex.addAndGet(10000)); + this.secondaryResourceManagers.put(subClusterId, rm); + return (T) rm; + } + } + } + + protected void setShouldReRegisterNext() { + if (mockRm != null) { + mockRm.setShouldReRegisterNext(); + } + for (MockResourceManagerFacade subCluster : secondaryResourceManagers + .values()) { + subCluster.setShouldReRegisterNext(); + } + } + + /** + * Extends the UnmanagedAMPoolManager and overrides methods to provide a + * testable implementation of UnmanagedAMPoolManager. + */ + protected class TestableUnmanagedAMPoolManager + extends UnmanagedAMPoolManager { + public TestableUnmanagedAMPoolManager(ExecutorService threadpool) { + super(threadpool); + } + + @Override + public UnmanagedApplicationManager createUAM(Configuration conf, + ApplicationId appId, String queueName, String submitter, + String appNameSuffix) { + return new TestableUnmanagedApplicationManager(conf, appId, queueName, + submitter, appNameSuffix); + } + } + + /** + * Extends the UnmanagedApplicationManager and overrides methods to provide a + * testable implementation. + */ + protected class TestableUnmanagedApplicationManager + extends UnmanagedApplicationManager { + + public TestableUnmanagedApplicationManager(Configuration conf, + ApplicationId appId, String queueName, String submitter, + String appNameSuffix) { + super(conf, appId, queueName, submitter, appNameSuffix); + } + + /** + * We override this method here to return a mock RM instances. The base + * class returns the proxy to the real RM which will not work in case of + * stand alone test cases. + */ + @Override + protected T createRMProxy(Class protocol, Configuration config, + UserGroupInformation user, Token token) + throws IOException { + return createSecondaryRMProxy(protocol, config, + YarnConfiguration.getClusterId(config)); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index 7980a80a2cb..6c96a475876 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -310,13 +310,13 @@ public abstract class BaseContainerManagerTest { new HashSet<>(finalStates); int timeoutSecs = 0; do { - Thread.sleep(2000); + Thread.sleep(1000); containerStatus = containerManager.getContainerStatuses(request) .getContainerStatuses().get(0); LOG.info("Waiting for container to get into one of states " + fStates + ". Current state is " + containerStatus.getState()); - timeoutSecs += 2; + timeoutSecs += 1; } while (!fStates.contains(containerStatus.getState()) && timeoutSecs < timeOutMax); LOG.info("Container state is " + containerStatus.getState()); @@ -371,7 +371,7 @@ public abstract class BaseContainerManagerTest { .containermanager.container.ContainerState currentState = null; int timeoutSecs = 0; do { - Thread.sleep(2000); + Thread.sleep(1000); container = containerManager.getContext().getContainers().get(containerID); if (container != null) { @@ -381,9 +381,9 @@ public abstract class BaseContainerManagerTest { LOG.info("Waiting for NM container to get into one of the following " + "states: " + finalStates + ". Current state is " + currentState); } - timeoutSecs += 2; + timeoutSecs += 1; } while (!finalStates.contains(currentState) - && timeoutSecs++ < timeOutMax); + && timeoutSecs < timeOutMax); LOG.info("Container state is " + currentState); Assert.assertTrue("ContainerState is not correct (timedout)", finalStates.contains(currentState)); @@ -421,6 +421,20 @@ public abstract class BaseContainerManagerTest { containerTokenIdentifier); } + public static Token createContainerToken(ContainerId cId, int version, + long rmIdentifier, NodeId nodeId, String user, Resource resource, + NMContainerTokenSecretManager containerTokenSecretManager, + LogAggregationContext logAggregationContext) throws IOException { + ContainerTokenIdentifier containerTokenIdentifier = + new ContainerTokenIdentifier(cId, version, nodeId.toString(), user, + resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, + Priority.newInstance(0), 0, logAggregationContext, null, + ContainerType.TASK, ExecutionType.GUARANTEED); + return BuilderUtils.newContainerToken(nodeId, + containerTokenSecretManager.retrievePassword(containerTokenIdentifier), + containerTokenIdentifier); + } + public static Token createContainerToken(ContainerId cId, long rmIdentifier, NodeId nodeId, String user, Resource resource, NMContainerTokenSecretManager containerTokenSecretManager, @@ -431,8 +445,23 @@ public abstract class BaseContainerManagerTest { System.currentTimeMillis() + 100000L, 123, rmIdentifier, Priority.newInstance(0), 0, logAggregationContext, null, ContainerType.TASK, executionType); - return BuilderUtils.newContainerToken(nodeId, containerTokenSecretManager - .retrievePassword(containerTokenIdentifier), + return BuilderUtils.newContainerToken(nodeId, + containerTokenSecretManager.retrievePassword(containerTokenIdentifier), + containerTokenIdentifier); + } + + public static Token createContainerToken(ContainerId cId, int version, + long rmIdentifier, NodeId nodeId, String user, Resource resource, + NMContainerTokenSecretManager containerTokenSecretManager, + LogAggregationContext logAggregationContext, ExecutionType executionType) + throws IOException { + ContainerTokenIdentifier containerTokenIdentifier = + new ContainerTokenIdentifier(cId, version, nodeId.toString(), user, + resource, System.currentTimeMillis() + 100000L, 123, rmIdentifier, + Priority.newInstance(0), 0, logAggregationContext, null, + ContainerType.TASK, executionType); + return BuilderUtils.newContainerToken(nodeId, + containerTokenSecretManager.retrievePassword(containerTokenIdentifier), containerTokenIdentifier); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 26a1003b9ac..f07571385ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -210,7 +210,7 @@ public class TestAuxServices { defaultAuxClassPath = new HashSet(Arrays.asList(StringUtils .getTrimmedStrings(auxClassPath))); } - Assert.assertTrue(auxName.equals("ServiceC")); + Assert.assertEquals("ServiceC", auxName); aux.serviceStop(); // create a new jar file, and configure it as customized class path diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index ba0ecce52df..9844225ff88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; @@ -47,10 +48,10 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -70,23 +71,26 @@ import org.apache.hadoop.yarn.api.records.ContainerRetryContext; import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.ConfigurationException; import org.apache.hadoop.yarn.exceptions.InvalidContainerException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.server.api.ResourceManagerConstants; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; -import org.apache.hadoop.yarn.server.nodemanager.CMgrDecreaseContainersResourceEvent; import org.apache.hadoop.yarn.server.nodemanager.CMgrSignalContainersEvent; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; @@ -99,6 +103,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Before; @@ -115,10 +120,34 @@ public class TestContainerManager extends BaseContainerManagerTest { static { LOG = LogFactory.getLog(TestContainerManager.class); } - + + private boolean delayContainers = false; + + @Override + protected ContainerExecutor createContainerExecutor() { + DefaultContainerExecutor exec = new DefaultContainerExecutor() { + @Override + public int launchContainer(ContainerStartContext ctx) + throws IOException, ConfigurationException { + if (delayContainers) { + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + // Nothing.. + } + } + return super.launchContainer(ctx); + } + }; + exec.setConf(conf); + return spy(exec); + } + @Override @Before public void setup() throws IOException { + conf.setInt( + YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10); super.setup(); } @@ -437,7 +466,15 @@ public class TestContainerManager extends BaseContainerManagerTest { File newStartFile = new File(tmpDir, "start_file_n.txt").getAbsoluteFile(); + ResourceUtilization beforeUpgrade = + ResourceUtilization.newInstance( + containerManager.getContainerScheduler().getCurrentUtilization()); prepareContainerUpgrade(autoCommit, false, false, cId, newStartFile); + ResourceUtilization afterUpgrade = + ResourceUtilization.newInstance( + containerManager.getContainerScheduler().getCurrentUtilization()); + Assert.assertEquals("Possible resource leak detected !!", + beforeUpgrade, afterUpgrade); // Assert that the First process is not alive anymore Assert.assertFalse("Process is still alive!", @@ -1459,7 +1496,7 @@ public class TestContainerManager extends BaseContainerManagerTest { Assert.assertEquals(strExceptionMsg, ContainerManagerImpl.INVALID_NMTOKEN_MSG); - ContainerManagerImpl spyContainerMgr = Mockito.spy(cMgrImpl); + ContainerManagerImpl spyContainerMgr = spy(cMgrImpl); UserGroupInformation ugInfo = UserGroupInformation.createRemoteUser("a"); Mockito.when(spyContainerMgr.getRemoteUgi()).thenReturn(ugInfo); Mockito.when(spyContainerMgr. @@ -1534,7 +1571,7 @@ public class TestContainerManager extends BaseContainerManagerTest { // container will have exited, and won't be in RUNNING state ContainerId cId0 = createContainerId(0); Token containerToken = - createContainerToken(cId0, DUMMY_RM_IDENTIFIER, + createContainerToken(cId0, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, Resource.newInstance(1234, 3), context.getContainerTokenSecretManager(), null); @@ -1549,22 +1586,21 @@ public class TestContainerManager extends BaseContainerManagerTest { context.getContainerTokenSecretManager(), null); increaseTokens.add(containerToken); - IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest - .newInstance(increaseTokens); - IncreaseContainersResourceResponse increaseResponse = - containerManager.increaseContainersResource(increaseRequest); + ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(increaseTokens); + ContainerUpdateResponse updateResponse = + containerManager.updateContainer(updateRequest); // Check response Assert.assertEquals( - 0, increaseResponse.getSuccessfullyIncreasedContainers().size()); - Assert.assertEquals(2, increaseResponse.getFailedRequests().size()); - for (Map.Entry entry : increaseResponse + 0, updateResponse.getSuccessfullyUpdatedContainers().size()); + Assert.assertEquals(2, updateResponse.getFailedRequests().size()); + for (Map.Entry entry : updateResponse .getFailedRequests().entrySet()) { Assert.assertNotNull("Failed message", entry.getValue().getMessage()); if (cId0.equals(entry.getKey())) { Assert.assertTrue(entry.getValue().getMessage() .contains("Resource can only be changed when a " - + "container is in RUNNING state")); + + "container is in RUNNING or SCHEDULED state")); } else if (cId7.equals(entry.getKey())) { Assert.assertTrue(entry.getValue().getMessage() .contains("Container " + cId7.toString() @@ -1576,90 +1612,6 @@ public class TestContainerManager extends BaseContainerManagerTest { } } - @Test - public void testIncreaseContainerResourceWithInvalidResource() throws Exception { - containerManager.start(); - File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile"); - PrintWriter fileWriter = new PrintWriter(scriptFile); - // Construct the Container-id - ContainerId cId = createContainerId(0); - if (Shell.WINDOWS) { - fileWriter.println("@ping -n 100 127.0.0.1 >nul"); - } else { - fileWriter.write("\numask 0"); - fileWriter.write("\nexec sleep 100"); - } - fileWriter.close(); - ContainerLaunchContext containerLaunchContext = - recordFactory.newRecordInstance(ContainerLaunchContext.class); - URL resource_alpha = - URL.fromPath(localFS - .makeQualified(new Path(scriptFile.getAbsolutePath()))); - LocalResource rsrc_alpha = - recordFactory.newRecordInstance(LocalResource.class); - rsrc_alpha.setResource(resource_alpha); - rsrc_alpha.setSize(-1); - rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); - rsrc_alpha.setType(LocalResourceType.FILE); - rsrc_alpha.setTimestamp(scriptFile.lastModified()); - String destinationFile = "dest_file"; - Map localResources = - new HashMap(); - localResources.put(destinationFile, rsrc_alpha); - containerLaunchContext.setLocalResources(localResources); - List commands = - Arrays.asList(Shell.getRunScriptCommand(scriptFile)); - containerLaunchContext.setCommands(commands); - - StartContainerRequest scRequest = - StartContainerRequest.newInstance( - containerLaunchContext, - createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), - user, context.getContainerTokenSecretManager())); - List list = new ArrayList<>(); - list.add(scRequest); - StartContainersRequest allRequests = - StartContainersRequest.newInstance(list); - containerManager.startContainers(allRequests); - // Make sure the container reaches RUNNING state - BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, - org.apache.hadoop.yarn.server.nodemanager. - containermanager.container.ContainerState.RUNNING); - // Construct container resource increase request, - List increaseTokens = new ArrayList<>(); - // Add increase request. The increase request should fail - // as the current resource does not fit in the target resource - Token containerToken = - createContainerToken(cId, DUMMY_RM_IDENTIFIER, - context.getNodeId(), user, - Resource.newInstance(512, 1), - context.getContainerTokenSecretManager(), null); - increaseTokens.add(containerToken); - IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest - .newInstance(increaseTokens); - IncreaseContainersResourceResponse increaseResponse = - containerManager.increaseContainersResource(increaseRequest); - // Check response - Assert.assertEquals( - 0, increaseResponse.getSuccessfullyIncreasedContainers().size()); - Assert.assertEquals(1, increaseResponse.getFailedRequests().size()); - for (Map.Entry entry : increaseResponse - .getFailedRequests().entrySet()) { - if (cId.equals(entry.getKey())) { - Assert.assertNotNull("Failed message", entry.getValue().getMessage()); - Assert.assertTrue(entry.getValue().getMessage() - .contains("The target resource " - + Resource.newInstance(512, 1).toString() - + " is smaller than the current resource " - + Resource.newInstance(1024, 1))); - } else { - throw new YarnException("Received failed request from wrong" - + " container: " + entry.getKey().toString()); - } - } - } - @Test public void testChangeContainerResource() throws Exception { containerManager.start(); @@ -1713,17 +1665,17 @@ public class TestContainerManager extends BaseContainerManagerTest { List increaseTokens = new ArrayList<>(); // Add increase request. Resource targetResource = Resource.newInstance(4096, 2); - Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, + Token containerToken = createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, targetResource, context.getContainerTokenSecretManager(), null); increaseTokens.add(containerToken); - IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest.newInstance(increaseTokens); - IncreaseContainersResourceResponse increaseResponse = - containerManager.increaseContainersResource(increaseRequest); + ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(increaseTokens); + ContainerUpdateResponse updateResponse = + containerManager.updateContainer(updateRequest); Assert.assertEquals( - 1, increaseResponse.getSuccessfullyIncreasedContainers().size()); - Assert.assertTrue(increaseResponse.getFailedRequests().isEmpty()); + 1, updateResponse.getSuccessfullyUpdatedContainers().size()); + Assert.assertTrue(updateResponse.getFailedRequests().isEmpty()); // Check status List containerIds = new ArrayList<>(); containerIds.add(cId); @@ -1734,15 +1686,19 @@ public class TestContainerManager extends BaseContainerManagerTest { // Check status immediately as resource increase is blocking assertEquals(targetResource, containerStatus.getCapability()); // Simulate a decrease request - List containersToDecrease - = new ArrayList<>(); + List decreaseTokens = new ArrayList<>(); targetResource = Resource.newInstance(2048, 2); - org.apache.hadoop.yarn.api.records.Container decreasedContainer = - org.apache.hadoop.yarn.api.records.Container - .newInstance(cId, null, null, targetResource, null, null); - containersToDecrease.add(decreasedContainer); - containerManager.handle( - new CMgrDecreaseContainersResourceEvent(containersToDecrease)); + Token token = createContainerToken(cId, 2, DUMMY_RM_IDENTIFIER, + context.getNodeId(), user, targetResource, + context.getContainerTokenSecretManager(), null); + decreaseTokens.add(token); + updateRequest = ContainerUpdateRequest.newInstance(decreaseTokens); + updateResponse = containerManager.updateContainer(updateRequest); + + Assert.assertEquals( + 1, updateResponse.getSuccessfullyUpdatedContainers().size()); + Assert.assertTrue(updateResponse.getFailedRequests().isEmpty()); + // Check status with retry containerStatus = containerManager .getContainerStatuses(gcsRequest).getContainerStatuses().get(0); @@ -1872,7 +1828,7 @@ public class TestContainerManager extends BaseContainerManagerTest { ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerLaunchContext spyContainerLaunchContext = - Mockito.spy(containerLaunchContext); + spy(containerLaunchContext); Mockito.when(spyContainerLaunchContext.getLocalResources()) .thenReturn(localResources); @@ -1917,7 +1873,7 @@ public class TestContainerManager extends BaseContainerManagerTest { ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerLaunchContext spyContainerLaunchContext = - Mockito.spy(containerLaunchContext); + spy(containerLaunchContext); Mockito.when(spyContainerLaunchContext.getLocalResources()) .thenReturn(localResources); @@ -1962,7 +1918,7 @@ public class TestContainerManager extends BaseContainerManagerTest { ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerLaunchContext spyContainerLaunchContext = - Mockito.spy(containerLaunchContext); + spy(containerLaunchContext); Mockito.when(spyContainerLaunchContext.getLocalResources()) .thenReturn(localResources); @@ -1989,4 +1945,122 @@ public class TestContainerManager extends BaseContainerManagerTest { Assert.assertTrue(response.getFailedRequests().get(cId).getMessage() .contains("Null resource visibility for local resource")); } + + @Test + public void testContainerUpdateExecTypeOpportunisticToGuaranteed() + throws IOException, YarnException, InterruptedException { + delayContainers = true; + containerManager.start(); + // Construct the Container-id + ContainerId cId = createContainerId(0); + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); + + StartContainerRequest scRequest = + StartContainerRequest.newInstance( + containerLaunchContext, + createContainerToken(cId, DUMMY_RM_IDENTIFIER, + context.getNodeId(), user, BuilderUtils.newResource(512, 1), + context.getContainerTokenSecretManager(), null, + ExecutionType.OPPORTUNISTIC)); + List list = new ArrayList<>(); + list.add(scRequest); + StartContainersRequest allRequests = + StartContainersRequest.newInstance(list); + containerManager.startContainers(allRequests); + // Make sure the container reaches RUNNING state + BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, + org.apache.hadoop.yarn.server.nodemanager. + containermanager.container.ContainerState.RUNNING); + // Construct container resource increase request, + List updateTokens = new ArrayList<>(); + Token containerToken = + createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(), + user, BuilderUtils.newResource(512, 1), + context.getContainerTokenSecretManager(), null, + ExecutionType.GUARANTEED); + updateTokens.add(containerToken); + ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(updateTokens); + ContainerUpdateResponse updateResponse = + containerManager.updateContainer(updateRequest); + + Assert.assertEquals( + 1, updateResponse.getSuccessfullyUpdatedContainers().size()); + Assert.assertTrue(updateResponse.getFailedRequests().isEmpty()); + + //Make sure the container is running + List statList = new ArrayList(); + statList.add(cId); + GetContainerStatusesRequest statRequest = + GetContainerStatusesRequest.newInstance(statList); + List containerStatuses = containerManager + .getContainerStatuses(statRequest).getContainerStatuses(); + Assert.assertEquals(1, containerStatuses.size()); + for (ContainerStatus status : containerStatuses) { + Assert.assertEquals( + org.apache.hadoop.yarn.api.records.ContainerState.RUNNING, + status.getState()); + Assert.assertEquals(ExecutionType.GUARANTEED, status.getExecutionType()); + } + } + + @Test + public void testContainerUpdateExecTypeGuaranteedToOpportunistic() + throws IOException, YarnException, InterruptedException { + delayContainers = true; + containerManager.start(); + // Construct the Container-id + ContainerId cId = createContainerId(0); + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); + + StartContainerRequest scRequest = + StartContainerRequest.newInstance( + containerLaunchContext, + createContainerToken(cId, DUMMY_RM_IDENTIFIER, + context.getNodeId(), user, BuilderUtils.newResource(512, 1), + context.getContainerTokenSecretManager(), null)); + List list = new ArrayList<>(); + list.add(scRequest); + StartContainersRequest allRequests = + StartContainersRequest.newInstance(list); + containerManager.startContainers(allRequests); + // Make sure the container reaches RUNNING state + BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, + org.apache.hadoop.yarn.server.nodemanager. + containermanager.container.ContainerState.RUNNING); + // Construct container resource increase request, + List updateTokens = new ArrayList<>(); + Token containerToken = + createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, context.getNodeId(), + user, BuilderUtils.newResource(512, 1), + context.getContainerTokenSecretManager(), null, + ExecutionType.OPPORTUNISTIC); + updateTokens.add(containerToken); + ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(updateTokens); + ContainerUpdateResponse updateResponse = + containerManager.updateContainer(updateRequest); + + Assert.assertEquals( + 1, updateResponse.getSuccessfullyUpdatedContainers().size()); + Assert.assertTrue(updateResponse.getFailedRequests().isEmpty()); + + //Make sure the container is running + List statList = new ArrayList(); + statList.add(cId); + GetContainerStatusesRequest statRequest = + GetContainerStatusesRequest.newInstance(statList); + List containerStatuses = containerManager + .getContainerStatuses(statRequest).getContainerStatuses(); + Assert.assertEquals(1, containerStatuses.size()); + for (ContainerStatus status : containerStatuses) { + Assert.assertEquals( + org.apache.hadoop.yarn.api.records.ContainerState.RUNNING, + status.getState()); + Assert + .assertEquals(ExecutionType.OPPORTUNISTIC, status.getExecutionType()); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index b1a7b4ba052..224e99cf9f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -49,9 +49,9 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; -import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; @@ -460,9 +460,9 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest { org.apache.hadoop.yarn.server.nodemanager .containermanager.container.ContainerState.RUNNING); Resource targetResource = Resource.newInstance(2048, 2); - IncreaseContainersResourceResponse increaseResponse = - increaseContainersResource(context, cm, cid, targetResource); - assertTrue(increaseResponse.getFailedRequests().isEmpty()); + ContainerUpdateResponse updateResponse = + updateContainers(context, cm, cid, targetResource); + assertTrue(updateResponse.getFailedRequests().isEmpty()); // check status ContainerStatus containerStatus = getContainerStatus(context, cm, cid); assertEquals(targetResource, containerStatus.getCapability()); @@ -643,7 +643,7 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest { }); } - private IncreaseContainersResourceResponse increaseContainersResource( + private ContainerUpdateResponse updateContainers( Context context, final ContainerManagerImpl cm, ContainerId cid, Resource capability) throws Exception { UserGroupInformation user = UserGroupInformation.createRemoteUser( @@ -652,21 +652,21 @@ public class TestContainerManagerRecovery extends BaseContainerManagerTest { final List increaseTokens = new ArrayList(); // add increase request Token containerToken = TestContainerManager.createContainerToken( - cid, 0, context.getNodeId(), user.getShortUserName(), + cid, 1, 0, context.getNodeId(), user.getShortUserName(), capability, context.getContainerTokenSecretManager(), null); increaseTokens.add(containerToken); - final IncreaseContainersResourceRequest increaseRequest = - IncreaseContainersResourceRequest.newInstance(increaseTokens); + final ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(increaseTokens); NMTokenIdentifier nmToken = new NMTokenIdentifier( cid.getApplicationAttemptId(), context.getNodeId(), user.getShortUserName(), context.getNMTokenSecretManager().getCurrentKey().getKeyId()); user.addTokenIdentifier(nmToken); return user.doAs( - new PrivilegedExceptionAction() { + new PrivilegedExceptionAction() { @Override - public IncreaseContainersResourceResponse run() throws Exception { - return cm.increaseContainersResource(increaseRequest); + public ContainerUpdateResponse run() throws Exception { + return cm.updateContainer(updateRequest); } }); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java index 674cd7142b8..006b0601edb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsCpuResourceHandlerImpl.java @@ -21,8 +21,10 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; @@ -294,4 +296,25 @@ public class TestCGroupsCpuResourceHandlerImpl { public void testStrictResourceUsage() throws Exception { Assert.assertNull(cGroupsCpuResourceHandler.teardown()); } + + @Test + public void testOpportunistic() throws Exception { + Configuration conf = new YarnConfiguration(); + + cGroupsCpuResourceHandler.bootstrap(plugin, conf); + ContainerTokenIdentifier tokenId = mock(ContainerTokenIdentifier.class); + when(tokenId.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC); + Container container = mock(Container.class); + String id = "container_01_01"; + ContainerId mockContainerId = mock(ContainerId.class); + when(mockContainerId.toString()).thenReturn(id); + when(container.getContainerId()).thenReturn(mockContainerId); + when(container.getContainerTokenIdentifier()).thenReturn(tokenId); + when(container.getResource()).thenReturn(Resource.newInstance(1024, 2)); + cGroupsCpuResourceHandler.preStart(container); + verify(mockCGroupsHandler, times(1)) + .updateCGroupParam(CGroupsHandler.CGroupController.CPU, id, + CGroupsHandler.CGROUP_CPU_SHARES, "2"); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java index dd8e33838b8..ab989cf40aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java @@ -573,4 +573,29 @@ public class TestCGroupsHandlerImpl { new File(new File(newMountPoint, "cpu"), this.hierarchy); assertTrue("Yarn cgroup should exist", hierarchyFile.exists()); } + + + @Test + public void testManualCgroupSetting() throws ResourceHandlerException { + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, tmpPath); + conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, + "/hadoop-yarn"); + File cpu = new File(new File(tmpPath, "cpuacct,cpu"), "/hadoop-yarn"); + + try { + Assert.assertTrue("temp dir should be created", cpu.mkdirs()); + + CGroupsHandlerImpl cGroupsHandler = new CGroupsHandlerImpl(conf, null); + cGroupsHandler.initializeCGroupController( + CGroupsHandler.CGroupController.CPU); + + Assert.assertEquals("CPU CGRoup path was not set", cpu.getAbsolutePath(), + new File(cGroupsHandler.getPathForCGroup( + CGroupsHandler.CGroupController.CPU, "")).getAbsolutePath()); + + } finally { + FileUtils.deleteQuietly(cpu); + } + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java index 180e1340906..8fd5a9d38e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java @@ -20,8 +20,10 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.junit.Before; @@ -32,6 +34,9 @@ import java.util.List; import static org.mockito.Mockito.*; +/** + * Unit test for CGroupsMemoryResourceHandlerImpl. + */ public class TestCGroupsMemoryResourceHandlerImpl { private CGroupsHandler mockCGroupsHandler; @@ -60,8 +65,7 @@ public class TestCGroupsMemoryResourceHandlerImpl { try { cGroupsMemoryResourceHandler.bootstrap(conf); Assert.fail("Pmem check should not be allowed to run with cgroups"); - } - catch(ResourceHandlerException re) { + } catch(ResourceHandlerException re) { // do nothing } conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); @@ -69,8 +73,7 @@ public class TestCGroupsMemoryResourceHandlerImpl { try { cGroupsMemoryResourceHandler.bootstrap(conf); Assert.fail("Vmem check should not be allowed to run with cgroups"); - } - catch(ResourceHandlerException re) { + } catch(ResourceHandlerException re) { // do nothing } } @@ -84,8 +87,7 @@ public class TestCGroupsMemoryResourceHandlerImpl { try { cGroupsMemoryResourceHandler.bootstrap(conf); Assert.fail("Negative values for swappiness should not be allowed."); - } - catch (ResourceHandlerException re) { + } catch (ResourceHandlerException re) { // do nothing } try { @@ -93,8 +95,7 @@ public class TestCGroupsMemoryResourceHandlerImpl { cGroupsMemoryResourceHandler.bootstrap(conf); Assert.fail("Values greater than 100 for swappiness" + " should not be allowed."); - } - catch (ResourceHandlerException re) { + } catch (ResourceHandlerException re) { // do nothing } conf.setInt(YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SWAPPINESS, 60); @@ -169,4 +170,32 @@ public class TestCGroupsMemoryResourceHandlerImpl { public void testTeardown() throws Exception { Assert.assertNull(cGroupsMemoryResourceHandler.teardown()); } + + @Test + public void testOpportunistic() throws Exception { + Configuration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); + conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); + + cGroupsMemoryResourceHandler.bootstrap(conf); + ContainerTokenIdentifier tokenId = mock(ContainerTokenIdentifier.class); + when(tokenId.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC); + Container container = mock(Container.class); + String id = "container_01_01"; + ContainerId mockContainerId = mock(ContainerId.class); + when(mockContainerId.toString()).thenReturn(id); + when(container.getContainerId()).thenReturn(mockContainerId); + when(container.getContainerTokenIdentifier()).thenReturn(tokenId); + when(container.getResource()).thenReturn(Resource.newInstance(1024, 2)); + cGroupsMemoryResourceHandler.preStart(container); + verify(mockCGroupsHandler, times(1)) + .updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, + CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, "0M"); + verify(mockCGroupsHandler, times(1)) + .updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, + CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, "100"); + verify(mockCGroupsHandler, times(1)) + .updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, + CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, "1024M"); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java new file mode 100644 index 00000000000..82c21473c9f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestAllocationBasedResourceUtilizationTracker.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests for the {@link AllocationBasedResourceUtilizationTracker} class. + */ +public class TestAllocationBasedResourceUtilizationTracker { + + private ContainerScheduler mockContainerScheduler; + + @Before + public void setup() { + mockContainerScheduler = mock(ContainerScheduler.class); + ContainersMonitor containersMonitor = + new ContainersMonitorImpl(mock(ContainerExecutor.class), + mock(AsyncDispatcher.class), mock(Context.class)); + YarnConfiguration conf = new YarnConfiguration(); + conf.setInt(YarnConfiguration.NM_PMEM_MB, 1024); + conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, true); + conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true); + conf.setFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO, 2.0f); + conf.setInt(YarnConfiguration.NM_VCORES, 8); + containersMonitor.init(conf); + when(mockContainerScheduler.getContainersMonitor()) + .thenReturn(containersMonitor); + } + + /** + * Node has capacity for 1024 MB and 8 cores. Saturate the node. When full the + * hasResourceAvailable should return false. + */ + @Test + public void testHasResourcesAvailable() { + AllocationBasedResourceUtilizationTracker tracker = + new AllocationBasedResourceUtilizationTracker(mockContainerScheduler); + Container testContainer = mock(Container.class); + when(testContainer.getResource()).thenReturn(Resource.newInstance(512, 4)); + for (int i = 0; i < 2; i++) { + Assert.assertTrue(tracker.hasResourcesAvailable(testContainer)); + tracker.addContainerResources(testContainer); + } + Assert.assertFalse(tracker.hasResourcesAvailable(testContainer)); + } + + /** + * Test the case where the current allocation has been truncated to 0.8888891 + * (8/9 cores used). Request 1 additional core - hasEnoughCpu should return + * true. + */ + @Test + public void testHasEnoughCpu() { + AllocationBasedResourceUtilizationTracker tracker = + new AllocationBasedResourceUtilizationTracker(mockContainerScheduler); + float currentAllocation = 0.8888891f; + long totalCores = 9; + int alreadyUsedCores = 8; + Assert.assertTrue(tracker.hasEnoughCpu(currentAllocation, totalCores, + (int) totalCores - alreadyUsedCores)); + Assert.assertFalse(tracker.hasEnoughCpu(currentAllocation, totalCores, + (int) totalCores - alreadyUsedCores + 1)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java index aeba399ca04..a1c247bf572 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java @@ -27,6 +27,8 @@ import java.util.List; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; @@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ExecutionType; +import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ConfigurationException; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -951,4 +954,97 @@ public class TestContainerSchedulerQueuing extends BaseContainerManagerTest { map.get(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED) .getContainerId()); } + + /** + * Starts one OPPORTUNISTIC container that takes up the whole node's + * resources, and submit one more that will be queued. Now promote the + * queued OPPORTUNISTIC container, which should kill the current running + * OPPORTUNISTIC container to make room for the promoted request. + * @throws Exception + */ + @Test + public void testPromotionOfOpportunisticContainers() throws Exception { + containerManager.start(); + + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); + + List list = new ArrayList<>(); + list.add(StartContainerRequest.newInstance( + containerLaunchContext, + createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER, + context.getNodeId(), + user, BuilderUtils.newResource(2048, 1), + context.getContainerTokenSecretManager(), null, + ExecutionType.OPPORTUNISTIC))); + list.add(StartContainerRequest.newInstance( + containerLaunchContext, + createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER, + context.getNodeId(), + user, BuilderUtils.newResource(1024, 1), + context.getContainerTokenSecretManager(), null, + ExecutionType.OPPORTUNISTIC))); + + StartContainersRequest allRequests = + StartContainersRequest.newInstance(list); + containerManager.startContainers(allRequests); + + Thread.sleep(5000); + + // Ensure first container is running and others are queued. + List statList = new ArrayList(); + for (int i = 0; i < 3; i++) { + statList.add(createContainerId(i)); + } + GetContainerStatusesRequest statRequest = GetContainerStatusesRequest + .newInstance(Arrays.asList(createContainerId(0))); + List containerStatuses = containerManager + .getContainerStatuses(statRequest).getContainerStatuses(); + for (ContainerStatus status : containerStatuses) { + if (status.getContainerId().equals(createContainerId(0))) { + Assert.assertEquals( + org.apache.hadoop.yarn.api.records.ContainerState.RUNNING, + status.getState()); + } else { + Assert.assertEquals( + org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED, + status.getState()); + } + } + + ContainerScheduler containerScheduler = + containerManager.getContainerScheduler(); + // Ensure two containers are properly queued. + Assert.assertEquals(1, containerScheduler.getNumQueuedContainers()); + Assert.assertEquals(0, + containerScheduler.getNumQueuedGuaranteedContainers()); + Assert.assertEquals(1, + containerScheduler.getNumQueuedOpportunisticContainers()); + + // Promote Queued Opportunistic Container + Token updateToken = + createContainerToken(createContainerId(1), 1, DUMMY_RM_IDENTIFIER, + context.getNodeId(), user, BuilderUtils.newResource(1024, 1), + context.getContainerTokenSecretManager(), null, + ExecutionType.GUARANTEED); + List updateTokens = new ArrayList(); + updateTokens.add(updateToken); + ContainerUpdateRequest updateRequest = + ContainerUpdateRequest.newInstance(updateTokens); + ContainerUpdateResponse updateResponse = + containerManager.updateContainer(updateRequest); + + Assert.assertEquals(1, + updateResponse.getSuccessfullyUpdatedContainers().size()); + Assert.assertEquals(0, updateResponse.getFailedRequests().size()); + + waitForContainerState(containerManager, createContainerId(0), + org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE); + + waitForContainerState(containerManager, createContainerId(1), + org.apache.hadoop.yarn.api.records.ContainerState.RUNNING); + + // Ensure no containers are queued. + Assert.assertEquals(0, containerScheduler.getNumQueuedContainers()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java index b562133eed1..7d8704f8b42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java @@ -41,6 +41,8 @@ import java.util.Scanner; import java.util.Set; import java.util.concurrent.CountDownLatch; +import static org.mockito.Mockito.when; + @Deprecated public class TestCgroupsLCEResourcesHandler { private static File cgroupDir = null; @@ -76,7 +78,7 @@ public class TestCgroupsLCEResourcesHandler { // Test 1, tasks file is empty // tasks file has no data, should return true - Mockito.stub(fspy.delete()).toReturn(true); + Mockito.when(fspy.delete()).thenReturn(true); Assert.assertTrue(handler.checkAndDeleteCgroup(fspy)); // Test 2, tasks file has data @@ -388,4 +390,33 @@ public class TestCgroupsLCEResourcesHandler { FileUtils.deleteQuietly(memory); } } + + @Test + public void testManualCgroupSetting() throws IOException { + CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler(); + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, + cgroupDir.getAbsolutePath()); + handler.setConf(conf); + File cpu = new File(new File(cgroupDir, "cpuacct,cpu"), "/hadoop-yarn"); + + try { + Assert.assertTrue("temp dir should be created", cpu.mkdirs()); + + final int numProcessors = 4; + ResourceCalculatorPlugin plugin = + Mockito.mock(ResourceCalculatorPlugin.class); + Mockito.doReturn(numProcessors).when(plugin).getNumProcessors(); + Mockito.doReturn(numProcessors).when(plugin).getNumCores(); + when(plugin.getNumProcessors()).thenReturn(8); + handler.init(null, plugin); + + Assert.assertEquals("CPU CGRoup path was not set", cpu.getParent(), + handler.getControllerPaths().get("cpu")); + + } finally { + FileUtils.deleteQuietly(cpu); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java index 022baeac492..4561e85c87e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java @@ -140,7 +140,7 @@ public class MockContainer implements Container { } @Override - public void setResource(Resource targetResource) { + public void setContainerTokenIdentifier(ContainerTokenIdentifier token) { } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java index a8dcda4f797..c5c9211d7d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -96,8 +97,8 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService zkSessionTimeout = conf.getLong(YarnConfiguration.RM_ZK_TIMEOUT_MS, YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS); - List zkAcls = RMZKUtils.getZKAcls(conf); - List zkAuths = RMZKUtils.getZKAuths(conf); + List zkAcls = ZKCuratorManager.getZKAcls(conf); + List zkAuths = ZKCuratorManager.getZKAuths(conf); int maxRetryNum = conf.getInt(YarnConfiguration.RM_HA_FC_ELECTOR_ZK_RETRIES_KEY, conf diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 76a164055a4..f77d09eb75d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security .AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import org.apache.hadoop.yarn.server.security.MasterKeyData; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; import org.apache.hadoop.yarn.util.resource.Resources; @@ -211,15 +212,13 @@ public class ApplicationMasterService extends AbstractService implements synchronized (lock) { AllocateResponse lastResponse = lock.getAllocateResponse(); if (hasApplicationMasterRegistered(applicationAttemptId)) { - String message = - "Application Master is already registered : " - + appID; + String message = AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE + appID; LOG.warn(message); RMAuditLogger.logFailure( - this.rmContext.getRMApps() - .get(appID).getUser(), - AuditConstants.REGISTER_AM, "", "ApplicationMasterService", message, - appID, applicationAttemptId); + this.rmContext.getRMApps() + .get(appID).getUser(), + AuditConstants.REGISTER_AM, "", "ApplicationMasterService", message, + appID, applicationAttemptId); throw new InvalidApplicationMasterRequestException(message); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 368832a9314..bcd1a9c92d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -43,7 +43,6 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; -import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.security.AccessRequest; @@ -65,7 +64,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; @@ -349,36 +347,6 @@ public class RMAppManager implements EventHandler, createAndPopulateNewRMApp(appContext, appState.getSubmitTime(), appState.getUser(), true, appState.getStartTime()); - // If null amReq has been returned, check if it is the case that - // application has specified node label expression while node label - // has been disabled. Reject the recovery of this application if it - // is true and give clear message so that user can react properly. - if (!appContext.getUnmanagedAM() && - (application.getAMResourceRequests() == null || - application.getAMResourceRequests().isEmpty()) && - !YarnConfiguration.areNodeLabelsEnabled(this.conf)) { - // check application submission context and see if am resource request - // or application itself contains any node label expression. - List amReqsFromAppContext = - appContext.getAMContainerResourceRequests(); - String labelExp = - (amReqsFromAppContext != null && !amReqsFromAppContext.isEmpty()) ? - amReqsFromAppContext.get(0).getNodeLabelExpression() : null; - if (labelExp == null) { - labelExp = appContext.getNodeLabelExpression(); - } - if (labelExp != null && - !labelExp.equals(RMNodeLabelsManager.NO_LABEL)) { - String message = "Failed to recover application " + appId - + ". NodeLabel is not enabled in cluster, but AM resource request " - + "contains a label expression."; - LOG.warn(message); - application.handle( - new RMAppEvent(appId, RMAppEventType.APP_REJECTED, message)); - return; - } - } - application.handle(new RMAppRecoverEvent(appId, rmState)); } @@ -398,28 +366,8 @@ public class RMAppManager implements EventHandler, } ApplicationId applicationId = submissionContext.getApplicationId(); - List amReqs = null; - try { - amReqs = validateAndCreateResourceRequest(submissionContext, isRecovery); - } catch (InvalidLabelResourceRequestException e) { - // This can happen if the application had been submitted and run - // with Node Label enabled but recover with Node Label disabled. - // Thus there might be node label expression in the application's - // resource requests. If this is the case, create RmAppImpl with - // null amReq and reject the application later with clear error - // message. So that the application can still be tracked by RM - // after recovery and user can see what's going on and react accordingly. - if (isRecovery && - !YarnConfiguration.areNodeLabelsEnabled(this.conf)) { - if (LOG.isDebugEnabled()) { - LOG.debug("AMResourceRequest is not created for " + applicationId - + ". NodeLabel is not enabled in cluster, but AM resource " - + "request contains a label expression."); - } - } else { - throw e; - } - } + List amReqs = validateAndCreateResourceRequest( + submissionContext, isRecovery); // Verify and get the update application priority and set back to // submissionContext diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java deleted file mode 100644 index 4b8561dae15..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager; - -import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.ZKUtil; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.zookeeper.data.ACL; - -import java.util.Collections; -import java.util.List; - -/** - * Helper class that provides utility methods specific to ZK operations - */ -@InterfaceAudience.Private -public class RMZKUtils { - private static final Log LOG = LogFactory.getLog(RMZKUtils.class); - - /** - * Utility method to fetch the ZK ACLs from the configuration. - * - * @throws java.io.IOException if the Zookeeper ACLs configuration file - * cannot be read - */ - public static List getZKAcls(Configuration conf) throws IOException { - // Parse authentication from configuration. - String zkAclConf = - conf.get(YarnConfiguration.RM_ZK_ACL, - YarnConfiguration.DEFAULT_RM_ZK_ACL); - try { - zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf); - return ZKUtil.parseACLs(zkAclConf); - } catch (IOException | ZKUtil.BadAclFormatException e) { - LOG.error("Couldn't read ACLs based on " + YarnConfiguration.RM_ZK_ACL); - throw e; - } - } - - /** - * Utility method to fetch ZK auth info from the configuration. - * - * @throws java.io.IOException if the Zookeeper ACLs configuration file - * cannot be read - */ - public static List getZKAuths(Configuration conf) - throws IOException { - String zkAuthConf = conf.get(YarnConfiguration.RM_ZK_AUTH); - try { - zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf); - if (zkAuthConf != null) { - return ZKUtil.parseAuth(zkAuthConf); - } else { - return Collections.emptyList(); - } - } catch (IOException | ZKUtil.BadAuthFormatException e) { - LOG.error("Couldn't read Auth based on " + YarnConfiguration.RM_ZK_AUTH); - throw e; - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index b63b60dfef2..5333f254328 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -22,8 +22,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.curator.framework.AuthInfo; import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.retry.RetryNTimes; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; @@ -46,7 +44,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -64,6 +62,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; +import org.apache.hadoop.yarn.server.resourcemanager.federation.FederationStateStoreService; import org.apache.hadoop.yarn.server.resourcemanager.metrics.NoOpSystemMetricPublisher; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV1Publisher; @@ -91,7 +90,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; @@ -185,12 +183,13 @@ public class ResourceManager extends CompositeService implements Recoverable { protected RMAppManager rmAppManager; protected ApplicationACLsManager applicationACLsManager; protected QueueACLsManager queueACLsManager; + private FederationStateStoreService federationStateStoreService; private WebApp webApp; private AppReportFetcher fetcher = null; protected ResourceTrackerService resourceTracker; private JvmMetrics jvmMetrics; private boolean curatorEnabled = false; - private CuratorFramework curator; + private ZKCuratorManager zkManager; private final String zkRootNodePassword = Long.toString(new SecureRandom().nextLong()); private boolean recoveryEnabled; @@ -343,7 +342,7 @@ public class ResourceManager extends CompositeService implements Recoverable { conf.getBoolean(YarnConfiguration.CURATOR_LEADER_ELECTOR, YarnConfiguration.DEFAULT_CURATOR_LEADER_ELECTOR_ENABLED); if (curatorEnabled) { - this.curator = createAndStartCurator(conf); + this.zkManager = createAndStartZKManager(conf); elector = new CuratorBasedElectorService(this); } else { elector = new ActiveStandbyElectorBasedElectorService(this); @@ -351,50 +350,49 @@ public class ResourceManager extends CompositeService implements Recoverable { return elector; } - public CuratorFramework createAndStartCurator(Configuration conf) + /** + * Create and ZooKeeper Curator manager. + * @param config Configuration for the ZooKeeper curator. + * @return New ZooKeeper Curator manager. + * @throws IOException If it cannot create the manager. + */ + public ZKCuratorManager createAndStartZKManager(Configuration config) throws IOException { - String zkHostPort = conf.get(YarnConfiguration.RM_ZK_ADDRESS); - if (zkHostPort == null) { - throw new YarnRuntimeException( - YarnConfiguration.RM_ZK_ADDRESS + " is not configured."); - } - int numRetries = conf.getInt(YarnConfiguration.RM_ZK_NUM_RETRIES, - YarnConfiguration.DEFAULT_ZK_RM_NUM_RETRIES); - int zkSessionTimeout = conf.getInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, - YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS); - int zkRetryInterval = conf.getInt(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS, - YarnConfiguration.DEFAULT_RM_ZK_RETRY_INTERVAL_MS); + ZKCuratorManager manager = new ZKCuratorManager(config); - // set up zk auths - List zkAuths = RMZKUtils.getZKAuths(conf); + // Get authentication List authInfos = new ArrayList<>(); - for (ZKUtil.ZKAuthInfo zkAuth : zkAuths) { - authInfos.add(new AuthInfo(zkAuth.getScheme(), zkAuth.getAuth())); + if (HAUtil.isHAEnabled(config) && HAUtil.getConfValueForRMInstance( + YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL, config) == null) { + String zkRootNodeUsername = HAUtil.getConfValueForRMInstance( + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, config); + String defaultFencingAuth = + zkRootNodeUsername + ":" + zkRootNodePassword; + byte[] defaultFencingAuthData = + defaultFencingAuth.getBytes(Charset.forName("UTF-8")); + String scheme = new DigestAuthenticationProvider().getScheme(); + AuthInfo authInfo = new AuthInfo(scheme, defaultFencingAuthData); + authInfos.add(authInfo); } - if (HAUtil.isHAEnabled(conf) && HAUtil.getConfValueForRMInstance( - YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL, conf) == null) { - String zkRootNodeUsername = HAUtil - .getConfValueForRMInstance(YarnConfiguration.RM_ADDRESS, - YarnConfiguration.DEFAULT_RM_ADDRESS, conf); - byte[] defaultFencingAuth = - (zkRootNodeUsername + ":" + zkRootNodePassword) - .getBytes(Charset.forName("UTF-8")); - authInfos.add(new AuthInfo(new DigestAuthenticationProvider().getScheme(), - defaultFencingAuth)); - } + manager.start(authInfos); + return manager; + } - CuratorFramework client = CuratorFrameworkFactory.builder() - .connectString(zkHostPort) - .sessionTimeoutMs(zkSessionTimeout) - .retryPolicy(new RetryNTimes(numRetries, zkRetryInterval)) - .authorization(authInfos).build(); - client.start(); - return client; + /** + * Get the ZooKeeper Curator manager. + * @return ZooKeeper Curator manager. + */ + public ZKCuratorManager getZKManager() { + return this.zkManager; } public CuratorFramework getCurator() { - return this.curator; + if (this.zkManager == null) { + return null; + } + return this.zkManager.getCurator(); } public String getZkRootNodePassword() { @@ -499,6 +497,10 @@ public class ResourceManager extends CompositeService implements Recoverable { return new RMTimelineCollectorManager(this); } + private FederationStateStoreService createFederationStateStoreService() { + return new FederationStateStoreService(rmContext); + } + protected SystemMetricsPublisher createSystemMetricsPublisher() { SystemMetricsPublisher publisher; if (YarnConfiguration.timelineServiceEnabled(conf) && @@ -695,8 +697,7 @@ public class ResourceManager extends CompositeService implements Recoverable { } } - // creating monitors that handle preemption - createPolicyMonitors(); + createSchedulerMonitors(); masterService = createApplicationMasterService(); addService(masterService) ; @@ -724,6 +725,20 @@ public class ResourceManager extends CompositeService implements Recoverable { delegationTokenRenewer.setRMContext(rmContext); } + if(HAUtil.isFederationEnabled(conf)) { + String cId = YarnConfiguration.getClusterId(conf); + if (cId.isEmpty()) { + String errMsg = + "Cannot initialize RM as Federation is enabled" + + " but cluster id is not configured."; + LOG.error(errMsg); + throw new YarnRuntimeException(errMsg); + } + federationStateStoreService = createFederationStateStoreService(); + addIfService(federationStateStoreService); + LOG.info("Initialized Federation membership."); + } + new RMNMInfo(rmContext, scheduler); super.serviceInit(conf); @@ -752,6 +767,13 @@ public class ResourceManager extends CompositeService implements Recoverable { LOG.error("Failed to load/recover state", e); throw e; } + } else { + if (HAUtil.isFederationEnabled(conf)) { + long epoch = conf.getLong(YarnConfiguration.RM_EPOCH, + YarnConfiguration.DEFAULT_RM_EPOCH); + rmContext.setEpoch(epoch); + LOG.info("Epoch set for Federation: " + epoch); + } } super.serviceStart(); @@ -776,9 +798,8 @@ public class ResourceManager extends CompositeService implements Recoverable { } - protected void createPolicyMonitors() { - if (scheduler instanceof PreemptableResourceScheduler - && conf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, + protected void createSchedulerMonitors() { + if (conf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS)) { LOG.info("Loading policy monitors"); List policies = conf.getInstances( @@ -1237,8 +1258,8 @@ public class ResourceManager extends CompositeService implements Recoverable { configurationProvider.close(); } super.serviceStop(); - if (curator != null) { - curator.close(); + if (zkManager != null) { + zkManager.close(); } transitionToStandby(false); rmContext.setHAServiceState(HAServiceState.STOPPING); @@ -1349,6 +1370,12 @@ public class ResourceManager extends CompositeService implements Recoverable { return this.queueACLsManager; } + @Private + @VisibleForTesting + public FederationStateStoreService getFederationStateStoreService() { + return this.federationStateStoreService; + } + @Private WebApp getWebapp() { return this.webApp; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index aa7f524ce30..e6f2bb2436d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -551,7 +551,7 @@ public class ResourceTrackerService extends AbstractService implements getResponseId() + 1, NodeAction.NORMAL, null, null, null, null, nextHeartBeatInterval); rmNode.updateNodeHeartbeatResponseForCleanup(nodeHeartBeatResponse); - rmNode.updateNodeHeartbeatResponseForContainersDecreasing( + rmNode.updateNodeHeartbeatResponseForUpdatedContainers( nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreHeartbeat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreHeartbeat.java new file mode 100644 index 00000000000..a4618a2fc3b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreHeartbeat.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.federation; + +import java.io.StringWriter; + +import javax.xml.bind.JAXBException; + +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.sun.jersey.api.json.JSONMarshaller; + +/** + * Periodic heart beat from a ResourceManager participating in + * federation to indicate liveliness. The heart beat publishes the current + * capabilities as represented by {@link ClusterMetricsInfo} of the sub cluster. + * + */ +public class FederationStateStoreHeartbeat implements Runnable { + + private static final Logger LOG = + LoggerFactory.getLogger(FederationStateStoreHeartbeat.class); + + private SubClusterId subClusterId; + private FederationStateStore stateStoreService; + + private final ResourceScheduler rs; + + private StringWriter currentClusterState; + private JSONJAXBContext jc; + private JSONMarshaller marshaller; + private String capability; + + public FederationStateStoreHeartbeat(SubClusterId subClusterId, + FederationStateStore stateStoreClient, ResourceScheduler scheduler) { + this.stateStoreService = stateStoreClient; + this.subClusterId = subClusterId; + this.rs = scheduler; + // Initialize the JAXB Marshaller + this.currentClusterState = new StringWriter(); + try { + this.jc = new JSONJAXBContext( + JSONConfiguration.mapped().rootUnwrapping(false).build(), + ClusterMetricsInfo.class); + marshaller = jc.createJSONMarshaller(); + } catch (JAXBException e) { + LOG.warn("Exception while trying to initialize JAXB context.", e); + } + LOG.info("Initialized Federation membership for cluster with timestamp: " + + ResourceManager.getClusterTimeStamp()); + } + + /** + * Get the current cluster state as a JSON string representation of the + * {@link ClusterMetricsInfo}. + */ + private void updateClusterState() { + try { + // get the current state + currentClusterState.getBuffer().setLength(0); + ClusterMetricsInfo clusterMetricsInfo = new ClusterMetricsInfo(rs); + marshaller.marshallToJSON(clusterMetricsInfo, currentClusterState); + capability = currentClusterState.toString(); + } catch (Exception e) { + LOG.warn("Exception while trying to generate cluster state," + + " so reverting to last know state.", e); + } + } + + @Override + public synchronized void run() { + try { + updateClusterState(); + SubClusterHeartbeatRequest request = SubClusterHeartbeatRequest + .newInstance(subClusterId, SubClusterState.SC_RUNNING, capability); + stateStoreService.subClusterHeartbeat(request); + LOG.debug("Sending the heartbeat with capability: {}", capability); + } catch (Exception e) { + LOG.warn("Exception when trying to heartbeat: ", e); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java new file mode 100644 index 00000000000..530184fe1b9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java @@ -0,0 +1,304 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.federation; + +import java.net.InetSocketAddress; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Implements {@link FederationStateStore} and provides a service for + * participating in the federation membership. + */ +public class FederationStateStoreService extends AbstractService + implements FederationStateStore { + + public static final Logger LOG = + LoggerFactory.getLogger(FederationStateStoreService.class); + + private Configuration config; + private ScheduledExecutorService scheduledExecutorService; + private FederationStateStoreHeartbeat stateStoreHeartbeat; + private FederationStateStore stateStoreClient = null; + private SubClusterId subClusterId; + private long heartbeatInterval; + private RMContext rmContext; + + public FederationStateStoreService(RMContext rmContext) { + super(FederationStateStoreService.class.getName()); + LOG.info("FederationStateStoreService initialized"); + this.rmContext = rmContext; + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + + this.config = conf; + + RetryPolicy retryPolicy = + FederationStateStoreFacade.createRetryPolicy(conf); + + this.stateStoreClient = + (FederationStateStore) FederationStateStoreFacade.createRetryInstance( + conf, YarnConfiguration.FEDERATION_STATESTORE_CLIENT_CLASS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS, + FederationStateStore.class, retryPolicy); + this.stateStoreClient.init(conf); + LOG.info("Initialized state store client class"); + + this.subClusterId = + SubClusterId.newInstance(YarnConfiguration.getClusterId(conf)); + + heartbeatInterval = conf.getLong( + YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS, + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS); + if (heartbeatInterval <= 0) { + heartbeatInterval = + YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS; + } + LOG.info("Initialized federation membership service."); + + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + + registerAndInitializeHeartbeat(); + + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + Exception ex = null; + try { + if (this.scheduledExecutorService != null + && !this.scheduledExecutorService.isShutdown()) { + this.scheduledExecutorService.shutdown(); + LOG.info("Stopped federation membership heartbeat"); + } + } catch (Exception e) { + LOG.error("Failed to shutdown ScheduledExecutorService", e); + ex = e; + } + + if (this.stateStoreClient != null) { + try { + deregisterSubCluster(SubClusterDeregisterRequest + .newInstance(subClusterId, SubClusterState.SC_UNREGISTERED)); + } finally { + this.stateStoreClient.close(); + } + } + + if (ex != null) { + throw ex; + } + } + + // Return a client accessible string representation of the service address. + private String getServiceAddress(InetSocketAddress address) { + InetSocketAddress socketAddress = NetUtils.getConnectAddress(address); + return socketAddress.getAddress().getHostAddress() + ":" + + socketAddress.getPort(); + } + + private void registerAndInitializeHeartbeat() { + String clientRMAddress = + getServiceAddress(rmContext.getClientRMService().getBindAddress()); + String amRMAddress = getServiceAddress( + rmContext.getApplicationMasterService().getBindAddress()); + String rmAdminAddress = getServiceAddress( + config.getSocketAddr(YarnConfiguration.RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_PORT)); + String webAppAddress = getServiceAddress(NetUtils + .createSocketAddr(WebAppUtils.getRMWebAppURLWithScheme(config))); + + SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId, + amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress, + SubClusterState.SC_NEW, ResourceManager.getClusterTimeStamp(), ""); + try { + registerSubCluster(SubClusterRegisterRequest.newInstance(subClusterInfo)); + LOG.info("Successfully registered for federation subcluster: {}", + subClusterInfo); + } catch (Exception e) { + throw new YarnRuntimeException( + "Failed to register Federation membership with the StateStore", e); + } + stateStoreHeartbeat = new FederationStateStoreHeartbeat(subClusterId, + stateStoreClient, rmContext.getScheduler()); + scheduledExecutorService = + HadoopExecutors.newSingleThreadScheduledExecutor(); + scheduledExecutorService.scheduleWithFixedDelay(stateStoreHeartbeat, + heartbeatInterval, heartbeatInterval, TimeUnit.SECONDS); + LOG.info("Started federation membership heartbeat with interval: {}", + heartbeatInterval); + } + + @VisibleForTesting + public FederationStateStore getStateStoreClient() { + return stateStoreClient; + } + + @VisibleForTesting + public FederationStateStoreHeartbeat getStateStoreHeartbeatThread() { + return stateStoreHeartbeat; + } + + @Override + public Version getCurrentVersion() { + return stateStoreClient.getCurrentVersion(); + } + + @Override + public Version loadVersion() { + return stateStoreClient.getCurrentVersion(); + } + + @Override + public GetSubClusterPolicyConfigurationResponse getPolicyConfiguration( + GetSubClusterPolicyConfigurationRequest request) throws YarnException { + return stateStoreClient.getPolicyConfiguration(request); + } + + @Override + public SetSubClusterPolicyConfigurationResponse setPolicyConfiguration( + SetSubClusterPolicyConfigurationRequest request) throws YarnException { + return stateStoreClient.setPolicyConfiguration(request); + } + + @Override + public GetSubClusterPoliciesConfigurationsResponse getPoliciesConfigurations( + GetSubClusterPoliciesConfigurationsRequest request) throws YarnException { + return stateStoreClient.getPoliciesConfigurations(request); + } + + @Override + public SubClusterRegisterResponse registerSubCluster( + SubClusterRegisterRequest registerSubClusterRequest) + throws YarnException { + return stateStoreClient.registerSubCluster(registerSubClusterRequest); + } + + @Override + public SubClusterDeregisterResponse deregisterSubCluster( + SubClusterDeregisterRequest subClusterDeregisterRequest) + throws YarnException { + return stateStoreClient.deregisterSubCluster(subClusterDeregisterRequest); + } + + @Override + public SubClusterHeartbeatResponse subClusterHeartbeat( + SubClusterHeartbeatRequest subClusterHeartbeatRequest) + throws YarnException { + return stateStoreClient.subClusterHeartbeat(subClusterHeartbeatRequest); + } + + @Override + public GetSubClusterInfoResponse getSubCluster( + GetSubClusterInfoRequest subClusterRequest) throws YarnException { + return stateStoreClient.getSubCluster(subClusterRequest); + } + + @Override + public GetSubClustersInfoResponse getSubClusters( + GetSubClustersInfoRequest subClustersRequest) throws YarnException { + return stateStoreClient.getSubClusters(subClustersRequest); + } + + @Override + public AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster( + AddApplicationHomeSubClusterRequest request) throws YarnException { + return stateStoreClient.addApplicationHomeSubCluster(request); + } + + @Override + public UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster( + UpdateApplicationHomeSubClusterRequest request) throws YarnException { + return stateStoreClient.updateApplicationHomeSubCluster(request); + } + + @Override + public GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest request) throws YarnException { + return stateStoreClient.getApplicationHomeSubCluster(request); + } + + @Override + public GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster( + GetApplicationsHomeSubClusterRequest request) throws YarnException { + return stateStoreClient.getApplicationsHomeSubCluster(request); + } + + @Override + public DeleteApplicationHomeSubClusterResponse deleteApplicationHomeSubCluster( + DeleteApplicationHomeSubClusterRequest request) throws YarnException { + return stateStoreClient.deleteApplicationHomeSubCluster(request); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/package-info.java new file mode 100644 index 00000000000..47c7c65d490 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/package-info.java @@ -0,0 +1,17 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.federation; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java index 47458a3b347..d2550e60267 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java @@ -19,12 +19,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; public interface SchedulingEditPolicy { void init(Configuration config, RMContext context, - PreemptableResourceScheduler scheduler); + ResourceScheduler scheduler); /** * This method is invoked at regular intervals. Internally the policy is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index 3def27f964b..1e3f6914031 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; import com.google.common.annotations.VisibleForTesting; @@ -59,8 +58,7 @@ public class SchedulingMonitor extends AbstractService { } public void serviceInit(Configuration conf) throws Exception { - scheduleEditPolicy.init(conf, rmContext, - (PreemptableResourceScheduler) rmContext.getScheduler()); + scheduleEditPolicy.init(conf, rmContext, rmContext.getScheduler()); this.monitorInterval = scheduleEditPolicy.getMonitoringInterval(); super.serviceInit(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index fc8ad2b347f..c4c98e2302c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -32,7 +32,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; @@ -150,7 +150,7 @@ public class ProportionalCapacityPreemptionPolicy } public void init(Configuration config, RMContext context, - PreemptableResourceScheduler sched) { + ResourceScheduler sched) { LOG.info("Preemption monitor:" + this.getClass().getCanonicalName()); assert null == scheduler : "Unexpected duplicate call to init"; if (!(sched instanceof CapacityScheduler)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/InvariantsChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/InvariantsChecker.java index 5800162bed1..2c9031fe827 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/InvariantsChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/InvariantsChecker.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.invariants; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,16 +39,16 @@ public abstract class InvariantsChecker implements SchedulingEditPolicy { private Configuration conf; private RMContext context; - private PreemptableResourceScheduler scheduler; + private ResourceScheduler scheduler; private boolean throwOnInvariantViolation; private long monitoringInterval; @Override public void init(Configuration config, RMContext rmContext, - PreemptableResourceScheduler preemptableResourceScheduler) { + ResourceScheduler scheduler) { this.conf = config; this.context = rmContext; - this.scheduler = preemptableResourceScheduler; + this.scheduler = scheduler; this.throwOnInvariantViolation = conf.getBoolean(InvariantsChecker.THROW_ON_VIOLATION, false); this.monitoringInterval = @@ -89,7 +89,7 @@ public abstract class InvariantsChecker implements SchedulingEditPolicy { return context; } - public PreemptableResourceScheduler getScheduler() { + public ResourceScheduler getScheduler() { return scheduler; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java index 9fee2bd2abc..ef4b9d071bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java @@ -27,8 +27,8 @@ import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,7 +42,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; /** * This policy checks at every invocation that a given set of invariants @@ -78,9 +77,9 @@ public class MetricsInvariantChecker extends InvariantsChecker { @Override public void init(Configuration config, RMContext rmContext, - PreemptableResourceScheduler preemptableResourceScheduler) { + ResourceScheduler scheduler) { - super.init(config, rmContext, preemptableResourceScheduler); + super.init(config, rmContext, scheduler); this.metricsSystem = DefaultMetricsSystem.instance(); this.queueMetrics = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 95919451a73..7cbeda3a3dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -197,7 +197,7 @@ public class FileSystemRMStateStore extends RMStateStore { @Override public synchronized long getAndIncrementEpoch() throws Exception { Path epochNodePath = getNodePath(rootDirPath, EPOCH_NODE); - long currentEpoch = 0; + long currentEpoch = baseEpoch; FileStatus status = getFileStatusWithRetries(epochNodePath); if (status != null) { // load current epoch diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java index 2ca53db2b34..16ae1d3c19a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java @@ -255,7 +255,7 @@ public class LeveldbRMStateStore extends RMStateStore { @Override public synchronized long getAndIncrementEpoch() throws Exception { - long currentEpoch = 0; + long currentEpoch = baseEpoch; byte[] dbKeyBytes = bytes(EPOCH_NODE); try { byte[] data = db.get(dbKeyBytes); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 5f3328b279b..5041000f0ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -83,6 +83,7 @@ public class MemoryRMStateStore extends RMStateStore { @Override public synchronized void initInternal(Configuration conf) { + epoch = baseEpoch; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index d0a8cf5ec4e..35340e62a22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.HAUtil; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; @@ -98,6 +99,7 @@ public abstract class RMStateStore extends AbstractService { "ReservationSystemRoot"; protected static final String VERSION_NODE = "RMVersionNode"; protected static final String EPOCH_NODE = "EpochNode"; + protected long baseEpoch; protected ResourceManager resourceManager; private final ReadLock readLock; private final WriteLock writeLock; @@ -690,6 +692,9 @@ public abstract class RMStateStore extends AbstractService { dispatcher.register(RMStateStoreEventType.class, rmStateStoreEventHandler); dispatcher.setDrainEventsOnStop(); + // read the base epoch value from conf + baseEpoch = conf.getLong(YarnConfiguration.RM_EPOCH, + YarnConfiguration.DEFAULT_RM_EPOCH); initInternal(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 3b986d14a01..a445e756433 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.curator.framework.CuratorFramework; @@ -31,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ReservationId; @@ -46,7 +46,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ReservationAllocationStateProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.RMZKUtils; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; @@ -201,8 +200,8 @@ public class ZKRMStateStore extends RMStateStore { private final String zkRootNodeAuthScheme = new DigestAuthenticationProvider().getScheme(); - @VisibleForTesting - protected CuratorFramework curatorFramework; + /** Manager for the ZooKeeper connection. */ + private ZKCuratorManager zkManager; /* * Indicates different app attempt state store operations. @@ -298,12 +297,11 @@ public class ZKRMStateStore extends RMStateStore { appIdNodeSplitIndex = YarnConfiguration.DEFAULT_ZK_APPID_NODE_SPLIT_INDEX; } - zkAcl = RMZKUtils.getZKAcls(conf); + zkAcl = ZKCuratorManager.getZKAcls(conf); if (HAUtil.isHAEnabled(conf)) { String zkRootNodeAclConf = HAUtil.getConfValueForRMInstance (YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL, conf); - if (zkRootNodeAclConf != null) { zkRootNodeAclConf = ZKUtil.resolveConfIndirection(zkRootNodeAclConf); @@ -330,17 +328,16 @@ public class ZKRMStateStore extends RMStateStore { amrmTokenSecretManagerRoot = getNodePath(zkRootNodePath, AMRMTOKEN_SECRET_MANAGER_ROOT); reservationRoot = getNodePath(zkRootNodePath, RESERVATION_SYSTEM_ROOT); - curatorFramework = resourceManager.getCurator(); - - if (curatorFramework == null) { - curatorFramework = resourceManager.createAndStartCurator(conf); + zkManager = resourceManager.getZKManager(); + if (zkManager == null) { + zkManager = resourceManager.createAndStartZKManager(conf); } } @Override public synchronized void startInternal() throws Exception { // ensure root dirs exist - createRootDirRecursively(znodeWorkingPath); + zkManager.createRootDirRecursively(znodeWorkingPath); create(zkRootNodePath); setRootNodeAcls(); delete(fencingNodePath); @@ -382,6 +379,7 @@ public class ZKRMStateStore extends RMStateStore { logRootNodeAcls("Before setting ACLs'\n"); } + CuratorFramework curatorFramework = zkManager.getCurator(); if (HAUtil.isHAEnabled(getConfig())) { curatorFramework.setACL().withACL(zkRootNodeAcl).forPath(zkRootNodePath); } else { @@ -401,6 +399,7 @@ public class ZKRMStateStore extends RMStateStore { } if (!HAUtil.isHAEnabled(getConfig())) { + CuratorFramework curatorFramework = zkManager.getCurator(); IOUtils.closeStream(curatorFramework); } } @@ -438,7 +437,7 @@ public class ZKRMStateStore extends RMStateStore { @Override public synchronized long getAndIncrementEpoch() throws Exception { String epochNodePath = getNodePath(zkRootNodePath, EPOCH_NODE); - long currentEpoch = 0; + long currentEpoch = baseEpoch; if (exists(epochNodePath)) { // load current epoch @@ -936,6 +935,7 @@ public class ZKRMStateStore extends RMStateStore { } safeDelete(appIdRemovePath); } else { + CuratorFramework curatorFramework = zkManager.getCurator(); curatorFramework.delete().deletingChildrenIfNeeded(). forPath(appIdRemovePath); } @@ -1145,22 +1145,6 @@ public class ZKRMStateStore extends RMStateStore { } } - /** - * Utility function to ensure that the configured base znode exists. - * This recursively creates the znode as well as all of its parents. - */ - private void createRootDirRecursively(String path) throws Exception { - String pathParts[] = path.split("/"); - Preconditions.checkArgument(pathParts.length >= 1 && pathParts[0].isEmpty(), - "Invalid path: %s", path); - StringBuilder sb = new StringBuilder(); - - for (int i = 1; i < pathParts.length; i++) { - sb.append("/").append(pathParts[i]); - create(sb.toString()); - } - } - /** * Get alternate path for app id if path according to configured split index * does not exist. We look for path based on all possible split indices. @@ -1236,38 +1220,32 @@ public class ZKRMStateStore extends RMStateStore { @VisibleForTesting byte[] getData(final String path) throws Exception { - return curatorFramework.getData().forPath(path); + return zkManager.getData(path); } @VisibleForTesting List getACL(final String path) throws Exception { - return curatorFramework.getACL().forPath(path); + return zkManager.getACL(path); } @VisibleForTesting List getChildren(final String path) throws Exception { - return curatorFramework.getChildren().forPath(path); + return zkManager.getChildren(path); } @VisibleForTesting boolean exists(final String path) throws Exception { - return curatorFramework.checkExists().forPath(path) != null; + return zkManager.exists(path); } @VisibleForTesting void create(final String path) throws Exception { - if (!exists(path)) { - curatorFramework.create() - .withMode(CreateMode.PERSISTENT).withACL(zkAcl) - .forPath(path, null); - } + zkManager.create(path, zkAcl); } @VisibleForTesting void delete(final String path) throws Exception { - if (exists(path)) { - curatorFramework.delete().deletingChildrenIfNeeded().forPath(path); - } + zkManager.delete(path); } private void safeCreate(String path, byte[] data, List acl, @@ -1310,6 +1288,7 @@ public class ZKRMStateStore extends RMStateStore { private CuratorTransactionFinal transactionFinal; SafeTransaction() throws Exception { + CuratorFramework curatorFramework = zkManager.getCurator(); CuratorTransaction transaction = curatorFramework.inTransaction(); transactionFinal = transaction.create() .withMode(CreateMode.PERSISTENT).withACL(zkAcl) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java index 0e9a825de07..027d066ab28 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java @@ -129,11 +129,12 @@ public class ReservationInputValidator { Resources.multiply(rr.getCapability(), rr.getConcurrency())); } // verify the allocation is possible (skip for ANY) - if (contract.getDeadline() - contract.getArrival() < minDuration + long duration = contract.getDeadline() - contract.getArrival(); + if (duration < minDuration && type != ReservationRequestInterpreter.R_ANY) { message = "The time difference (" - + (contract.getDeadline() - contract.getArrival()) + + (duration) + ") between arrival (" + contract.getArrival() + ") " + "and deadline (" + contract.getDeadline() + ") must " + " be greater or equal to the minimum resource duration (" @@ -158,15 +159,22 @@ public class ReservationInputValidator { // check that the recurrence is a positive long value. String recurrenceExpression = contract.getRecurrenceExpression(); try { - Long recurrence = Long.parseLong(recurrenceExpression); + long recurrence = Long.parseLong(recurrenceExpression); if (recurrence < 0) { message = "Negative Period : " + recurrenceExpression + ". Please try" - + " again with a non-negative long value as period"; + + " again with a non-negative long value as period."; + throw RPCUtil.getRemoteException(message); + } + // verify duration is less than recurrence for periodic reservations + if (recurrence > 0 && duration > recurrence) { + message = "Duration of the requested reservation: " + duration + + " is greater than the recurrence: " + recurrence + + ". Please try again with a smaller duration."; throw RPCUtil.getRemoteException(message); } } catch (NumberFormatException e) { message = "Invalid period " + recurrenceExpression + ". Please try" - + " again with a non-negative long value as period"; + + " again with a non-negative long value as period."; throw RPCUtil.getRemoteException(message); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index fa2f20c7231..03be7937b3a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -888,7 +888,8 @@ public class RMAppImpl implements RMApp, Recoverable { /* keep the master in sync with the state machine */ this.stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { - LOG.error("Can't handle this event at current state", e); + LOG.error("App: " + appID + + " can't handle this event at current state", e); /* TODO fail the application on the failed transition */ } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 4210c54b863..7d453bdfb33 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -911,7 +911,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { /* keep the master in sync with the state machine */ this.stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { - LOG.error("Can't handle this event at current state", e); + LOG.error("App attempt: " + appAttemptID + + " can't handle this event at current state", e); /* TODO fail the application on the failed transition */ } @@ -1315,7 +1316,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { // AFTER the initial saving on app-attempt-start // These fields can be visible from outside only after they are saved in // StateStore - String diags = null; + BoundedAppender diags = new BoundedAppender(diagnostics.limit); // don't leave the tracking URL pointing to a non-existent AM if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, @@ -1329,15 +1330,15 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { int exitStatus = ContainerExitStatus.INVALID; switch (event.getType()) { case LAUNCH_FAILED: - diags = event.getDiagnosticMsg(); + diags.append(event.getDiagnosticMsg()); break; case REGISTERED: - diags = getUnexpectedAMRegisteredDiagnostics(); + diags.append(getUnexpectedAMRegisteredDiagnostics()); break; case UNREGISTERED: RMAppAttemptUnregistrationEvent unregisterEvent = (RMAppAttemptUnregistrationEvent) event; - diags = unregisterEvent.getDiagnosticMsg(); + diags.append(unregisterEvent.getDiagnosticMsg()); // reset finalTrackingUrl to url sent by am finalTrackingUrl = sanitizeTrackingUrl(unregisterEvent.getFinalTrackingUrl()); finalStatus = unregisterEvent.getFinalApplicationStatus(); @@ -1345,16 +1346,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { case CONTAINER_FINISHED: RMAppAttemptContainerFinishedEvent finishEvent = (RMAppAttemptContainerFinishedEvent) event; - diags = getAMContainerCrashedDiagnostics(finishEvent); + diags.append(getAMContainerCrashedDiagnostics(finishEvent)); exitStatus = finishEvent.getContainerStatus().getExitStatus(); break; case KILL: break; case FAIL: - diags = event.getDiagnosticMsg(); + diags.append(event.getDiagnosticMsg()); break; case EXPIRE: - diags = getAMExpiredDiagnostics(event); + diags.append(getAMExpiredDiagnostics(event)); break; default: break; @@ -1368,7 +1369,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { ApplicationAttemptStateData.newInstance( applicationAttemptId, getMasterContainer(), rmStore.getCredentialsFromAppAttempt(this), - startTime, stateToBeStored, finalTrackingUrl, diags, + startTime, stateToBeStored, finalTrackingUrl, diags.toString(), finalStatus, exitStatus, getFinishTime(), resUsage.getMemorySeconds(), resUsage.getVcoreSeconds(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java index e089050de6a..0655609a893 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.util.resource.Resources; @@ -125,14 +126,16 @@ public class RMAppAttemptMetrics { long vcoreSeconds = finishedVcoreSeconds.get(); // Only add in the running containers if this is the active attempt. - RMAppAttempt currentAttempt = rmContext.getRMApps() - .get(attemptId.getApplicationId()).getCurrentAppAttempt(); - if (currentAttempt.getAppAttemptId().equals(attemptId)) { - ApplicationResourceUsageReport appResUsageReport = rmContext - .getScheduler().getAppResourceUsageReport(attemptId); - if (appResUsageReport != null) { - memorySeconds += appResUsageReport.getMemorySeconds(); - vcoreSeconds += appResUsageReport.getVcoreSeconds(); + RMApp rmApp = rmContext.getRMApps().get(attemptId.getApplicationId()); + if (null != rmApp) { + RMAppAttempt currentAttempt = rmApp.getCurrentAppAttempt(); + if (currentAttempt.getAppAttemptId().equals(attemptId)) { + ApplicationResourceUsageReport appResUsageReport = rmContext + .getScheduler().getAppResourceUsageReport(attemptId); + if (appResUsageReport != null) { + memorySeconds += appResUsageReport.getMemorySeconds(); + vcoreSeconds += appResUsageReport.getVcoreSeconds(); + } } } return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index 1e9463a3e01..f49db7e761b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -51,8 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode - .RMNodeDecreaseContainerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeUpdateContainerEvent; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.state.InvalidStateTransitionException; import org.apache.hadoop.yarn.state.MultipleArcTransition; @@ -284,7 +283,6 @@ public class RMContainerImpl implements RMContainer { @Override public RMContainerState getState() { this.readLock.lock(); - try { return this.stateMachine.getCurrentState(); } finally { @@ -598,7 +596,7 @@ public class RMContainerImpl implements RMContainer { RMContainerUpdatesAcquiredEvent acquiredEvent = (RMContainerUpdatesAcquiredEvent) event; if (acquiredEvent.isIncreasedContainer()) { - // If container is increased but not acquired by AM, we will start + // If container is increased but not started by AM, we will start // containerAllocationExpirer for this container in this transition. container.containerAllocationExpirer.register( new AllocationExpirationInfo(event.getContainerId(), true)); @@ -641,7 +639,7 @@ public class RMContainerImpl implements RMContainer { container.lastConfirmedResource = rmContainerResource; container.containerAllocationExpirer.unregister( new AllocationExpirationInfo(event.getContainerId())); - container.eventHandler.handle(new RMNodeDecreaseContainerEvent( + container.eventHandler.handle(new RMNodeUpdateContainerEvent( container.nodeId, Collections.singletonList(container.getContainer()))); } else if (Resources.fitsIn(nmContainerResource, rmContainerResource)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index 86f8679fa04..ab15c95bd97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -144,7 +144,7 @@ public interface RMNode { * applications to clean up for this node. * @param response the {@link NodeHeartbeatResponse} to update */ - public void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response); + void updateNodeHeartbeatResponseForCleanup(NodeHeartbeatResponse response); public NodeHeartbeatResponse getLastNodeHeartBeatResponse(); @@ -169,9 +169,9 @@ public interface RMNode { public Set getNodeLabels(); /** - * Update containers to be decreased + * Update containers to be updated */ - public void updateNodeHeartbeatResponseForContainersDecreasing( + void updateNodeHeartbeatResponseForUpdatedContainers( NodeHeartbeatResponse response); public List pullNewlyIncreasedContainers(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java index b28fef3c92a..a3b2ed72e94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java @@ -42,7 +42,7 @@ public enum RMNodeEventType { // Source: Container CONTAINER_ALLOCATED, CLEANUP_CONTAINER, - DECREASE_CONTAINER, + UPDATE_CONTAINER, // Source: ClientRMService SIGNAL_CONTAINER, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 1f121f8438e..1bdaa98b16e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -171,7 +171,7 @@ public class RMNodeImpl implements RMNode, EventHandler { private final List runningApplications = new ArrayList(); - private final Map toBeDecreasedContainers = + private final Map toBeUpdatedContainers = new HashMap<>(); private final Map nmReportedIncreasedContainers = @@ -228,8 +228,8 @@ public class RMNodeImpl implements RMNode, EventHandler { .addTransition(NodeState.RUNNING, NodeState.RUNNING, RMNodeEventType.RESOURCE_UPDATE, new UpdateNodeResourceWhenRunningTransition()) .addTransition(NodeState.RUNNING, NodeState.RUNNING, - RMNodeEventType.DECREASE_CONTAINER, - new DecreaseContainersTransition()) + RMNodeEventType.UPDATE_CONTAINER, + new UpdateContainersTransition()) .addTransition(NodeState.RUNNING, NodeState.RUNNING, RMNodeEventType.SIGNAL_CONTAINER, new SignalContainerTransition()) .addTransition(NodeState.RUNNING, NodeState.SHUTDOWN, @@ -614,18 +614,18 @@ public class RMNodeImpl implements RMNode, EventHandler { }; @VisibleForTesting - public Collection getToBeDecreasedContainers() { - return toBeDecreasedContainers.values(); + public Collection getToBeUpdatedContainers() { + return toBeUpdatedContainers.values(); } @Override - public void updateNodeHeartbeatResponseForContainersDecreasing( + public void updateNodeHeartbeatResponseForUpdatedContainers( NodeHeartbeatResponse response) { this.writeLock.lock(); try { - response.addAllContainersToDecrease(toBeDecreasedContainers.values()); - toBeDecreasedContainers.clear(); + response.addAllContainersToUpdate(toBeUpdatedContainers.values()); + toBeUpdatedContainers.clear(); } finally { this.writeLock.unlock(); } @@ -1031,16 +1031,19 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeFinishedContainersPulledByAMEvent) event).getContainers()); } } - - public static class DecreaseContainersTransition + + /** + * Transition to Update a container. + */ + public static class UpdateContainersTransition implements SingleArcTransition { @Override public void transition(RMNodeImpl rmNode, RMNodeEvent event) { - RMNodeDecreaseContainerEvent de = (RMNodeDecreaseContainerEvent) event; + RMNodeUpdateContainerEvent de = (RMNodeUpdateContainerEvent) event; - for (Container c : de.getToBeDecreasedContainers()) { - rmNode.toBeDecreasedContainers.put(c.getId(), c); + for (Container c : de.getToBeUpdatedContainers()) { + rmNode.toBeUpdatedContainers.put(c.getId(), c); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeDecreaseContainerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeUpdateContainerEvent.java similarity index 65% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeDecreaseContainerEvent.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeUpdateContainerEvent.java index 62925adc37b..73af563dba0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeDecreaseContainerEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeUpdateContainerEvent.java @@ -23,17 +23,22 @@ import java.util.List; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.NodeId; -public class RMNodeDecreaseContainerEvent extends RMNodeEvent { - final List toBeDecreasedContainers; +/** + * This class is used to create update container event + * for the containers running on a node. + * + */ +public class RMNodeUpdateContainerEvent extends RMNodeEvent { + private List toBeUpdatedContainers; - public RMNodeDecreaseContainerEvent(NodeId nodeId, - List toBeDecreasedContainers) { - super(nodeId, RMNodeEventType.DECREASE_CONTAINER); - - this.toBeDecreasedContainers = toBeDecreasedContainers; + public RMNodeUpdateContainerEvent(NodeId nodeId, + List toBeUpdatedContainers) { + super(nodeId, RMNodeEventType.UPDATE_CONTAINER); + + this.toBeUpdatedContainers = toBeUpdatedContainers; } - - public List getToBeDecreasedContainers() { - return toBeDecreasedContainers; + + public List getToBeUpdatedContainers() { + return toBeUpdatedContainers; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index c00b7bed3d5..c3879dd2a8c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstant import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -149,6 +150,10 @@ public abstract class AbstractYarnScheduler */ protected final ReentrantReadWriteLock.WriteLock writeLock; + // If set to true, then ALL container updates will be automatically sent to + // the NM in the next heartbeat. + private boolean autoUpdateContainers = false; + /** * Construct the service. * @@ -177,6 +182,9 @@ public abstract class AbstractYarnScheduler configuredMaximumAllocationWaitTime); maxClusterLevelAppPriority = getMaxPriorityFromConf(conf); createReleaseCache(); + autoUpdateContainers = + conf.getBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS, + YarnConfiguration.DEFAULT_RM_AUTO_UPDATE_CONTAINERS); super.serviceInit(conf); } @@ -234,6 +242,10 @@ public abstract class AbstractYarnScheduler return nodeTracker.getNodes(nodeFilter); } + public boolean shouldContainersBeAutoUpdated() { + return this.autoUpdateContainers; + } + @Override public Resource getClusterResource() { return nodeTracker.getClusterCapacity(); @@ -322,6 +334,7 @@ public abstract class AbstractYarnScheduler } + // TODO: Rename it to getCurrentApplicationAttempt public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) { SchedulerApplication app = applications.get( applicationAttemptId.getApplicationId()); @@ -518,12 +531,10 @@ public abstract class AbstractYarnScheduler container.setVersion(status.getVersion()); ApplicationAttemptId attemptId = container.getId().getApplicationAttemptId(); - RMContainer rmContainer = - new RMContainerImpl(container, - SchedulerRequestKey.extractFrom(container), attemptId, - node.getNodeID(), applications.get( - attemptId.getApplicationId()).getUser(), rmContext, - status.getCreationTime(), status.getNodeLabelExpression()); + RMContainer rmContainer = new RMContainerImpl(container, + SchedulerRequestKey.extractFrom(container), attemptId, node.getNodeID(), + applications.get(attemptId.getApplicationId()).getUser(), rmContext, + status.getCreationTime(), status.getNodeLabelExpression()); return rmContainer; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index 851c9f5587b..8acf7d55dbb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -524,7 +524,7 @@ public class AppSchedulingInfo { this.placesBlacklistedByApp = appInfo.getBlackList(); } - public void recoverContainer(RMContainer rmContainer) { + public void recoverContainer(RMContainer rmContainer, String partition) { try { this.writeLock.lock(); QueueMetrics metrics = queue.getMetrics(); @@ -540,8 +540,8 @@ public class AppSchedulingInfo { return; } - metrics.allocateResources(rmContainer.getNodeLabelExpression(), - user, 1, rmContainer.getAllocatedResource(), false); + metrics.allocateResources(partition, user, 1, + rmContainer.getAllocatedResource(), false); } finally { this.writeLock.unlock(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 4b0bf91e250..cc14a1eb9c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -74,8 +74,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerUpdatesAcquiredEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode - .RMNodeDecreaseContainerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeUpdateContainerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity; @@ -655,7 +654,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { container.getNodeId(), getUser(), container.getResource(), container.getPriority(), rmContainer.getCreationTime(), this.logAggregationContext, rmContainer.getNodeLabelExpression(), - containerType)); + containerType, container.getExecutionType())); updateNMToken(container); } catch (IllegalArgumentException e) { // DNS might be down, skip returning this container. @@ -663,20 +662,38 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { + " an updated container " + container.getId(), e); return null; } - - if (updateType == null || - ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateType || - ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateType) { + + if (updateType == null) { + // This is a newly allocated container rmContainer.handle(new RMContainerEvent( rmContainer.getContainerId(), RMContainerEventType.ACQUIRED)); } else { - rmContainer.handle(new RMContainerUpdatesAcquiredEvent( - rmContainer.getContainerId(), - ContainerUpdateType.INCREASE_RESOURCE == updateType)); - if (ContainerUpdateType.DECREASE_RESOURCE == updateType) { + // Resource increase is handled as follows: + // If the AM does not use the updated token to increase the container + // for a configured period of time, the RM will automatically rollback + // the update by performing a container decrease. This rollback (which + // essentially is another resource decrease update) is notified to the + // NM heartbeat response. If autoUpdate flag is set, then AM does not + // need to do anything - same code path as resource decrease. + // + // Resource Decrease is always automatic: the AM never has to do + // anything. It is always via NM heartbeat response. + // + // ExecutionType updates (both Promotion and Demotion) are either + // always automatic (if the flag is set) or the AM has to explicitly + // call updateContainer() on the NM. There is no expiry + boolean autoUpdate = + ContainerUpdateType.DECREASE_RESOURCE == updateType || + ((AbstractYarnScheduler)rmContext.getScheduler()) + .shouldContainersBeAutoUpdated(); + if (autoUpdate) { this.rmContext.getDispatcher().getEventHandler().handle( - new RMNodeDecreaseContainerEvent(rmContainer.getNodeId(), + new RMNodeUpdateContainerEvent(rmContainer.getNodeId(), Collections.singletonList(rmContainer.getContainer()))); + } else { + rmContainer.handle(new RMContainerUpdatesAcquiredEvent( + rmContainer.getContainerId(), + ContainerUpdateType.INCREASE_RESOURCE == updateType)); } } return container; @@ -1103,7 +1120,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { try { writeLock.lock(); // recover app scheduling info - appSchedulingInfo.recoverContainer(rmContainer); + appSchedulingInfo.recoverContainer(rmContainer, node.getPartition()); if (rmContainer.getState().equals(RMContainerState.COMPLETED)) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index c67f1ce1cc4..7b554db4705 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -22,6 +22,8 @@ import java.util.Set; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -55,7 +57,9 @@ import org.apache.hadoop.yarn.util.resource.Resources; @Private @Unstable public class SchedulerUtils { - + + private static final Log LOG = LogFactory.getLog(SchedulerUtils.class); + private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); @@ -200,9 +204,14 @@ public class SchedulerUtils { String labelExp = resReq.getNodeLabelExpression(); if (!(RMNodeLabelsManager.NO_LABEL.equals(labelExp) || null == labelExp)) { - throw new InvalidLabelResourceRequestException( - "Invalid resource request, node label not enabled " - + "but request contains label expression"); + String message = "NodeLabel is not enabled in cluster, but resource" + + " request contains a label expression."; + LOG.warn(message); + if (!isRecovery) { + throw new InvalidLabelResourceRequestException( + "Invalid resource request, node label not enabled " + + "but request contains label expression"); + } } } if (null == queueInfo) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java index 5fbdead6346..d7c452a1ffc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -76,6 +76,7 @@ public abstract class AbstractCSQueue implements CSQueue { private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class); volatile CSQueue parent; final String queueName; + private final String queuePath; volatile int numContainers; final Resource minimumAllocation; @@ -119,6 +120,8 @@ public abstract class AbstractCSQueue implements CSQueue { this.labelManager = cs.getRMContext().getNodeLabelManager(); this.parent = parent; this.queueName = queueName; + this.queuePath = + ((parent == null) ? "" : (parent.getQueuePath() + ".")) + this.queueName; this.resourceCalculator = cs.getResourceCalculator(); this.activitiesManager = cs.getActivitiesManager(); @@ -150,6 +153,11 @@ public abstract class AbstractCSQueue implements CSQueue { queueCapacities, parent == null ? null : parent.getQueueCapacities()); } + + @Override + public String getQueuePath() { + return queuePath; + } @Override public float getCapacity() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 2ccaf630d66..e4ca0031b83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -94,11 +94,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueInvalidExcep import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedContainerChangeRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; @@ -163,6 +161,9 @@ public class CapacityScheduler extends private int offswitchPerHeartbeatLimit; + private boolean assignMultipleEnabled; + + private int maxAssignPerHeartbeat; @Override public void setConf(Configuration conf) { @@ -308,6 +309,9 @@ public class CapacityScheduler extends asyncScheduleInterval = this.conf.getLong(ASYNC_SCHEDULER_INTERVAL, DEFAULT_ASYNC_SCHEDULER_INTERVAL); + this.assignMultipleEnabled = this.conf.getAssignMultipleEnabled(); + this.maxAssignPerHeartbeat = this.conf.getMaxAssignPerHeartbeat(); + // number of threads for async scheduling int maxAsyncSchedulingThreads = this.conf.getInt( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD, @@ -899,6 +903,19 @@ public class CapacityScheduler extends ContainerUpdates updateRequests) { FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId); if (application == null) { + LOG.error("Calling allocate on removed or non existent application " + + applicationAttemptId.getApplicationId()); + return EMPTY_ALLOCATION; + } + + // The allocate may be the leftover from previous attempt, and it will + // impact current attempt, such as confuse the request and allocation for + // current attempt's AM container. + // Note outside precondition check for the attempt id may be + // outdated here, so double check it here is necessary. + if (!application.getApplicationAttemptId().equals(applicationAttemptId)) { + LOG.error("Calling allocate on previous or removed " + + "or non existent application attempt " + applicationAttemptId); return EMPTY_ALLOCATION; } @@ -1109,17 +1126,29 @@ public class CapacityScheduler extends .getAssignmentInformation().getReserved()); } - private boolean canAllocateMore(CSAssignment assignment, int offswitchCount) { - if (null != assignment && Resources.greaterThan(getResourceCalculator(), - getClusterResource(), assignment.getResource(), Resources.none()) - && offswitchCount < offswitchPerHeartbeatLimit) { - // And it should not be a reserved container - if (assignment.getAssignmentInformation().getNumReservations() == 0) { - return true; - } + private boolean canAllocateMore(CSAssignment assignment, int offswitchCount, + int assignedContainers) { + // Current assignment shouldn't be empty + if (assignment == null + || Resources.equals(assignment.getResource(), Resources.none())) { + return false; } - return false; + // offswitch assignment should be under threshold + if (offswitchCount >= offswitchPerHeartbeatLimit) { + return false; + } + + // And it should not be a reserved container + if (assignment.getAssignmentInformation().getNumReservations() > 0) { + return false; + } + + // assignMultipleEnabled should be ON, + // and assignedContainers should be under threshold + return assignMultipleEnabled + && (maxAssignPerHeartbeat == -1 + || assignedContainers < maxAssignPerHeartbeat); } /** @@ -1131,6 +1160,7 @@ public class CapacityScheduler extends FiCaSchedulerNode node = getNode(nodeId); if (null != node) { int offswitchCount = 0; + int assignedContainers = 0; PlacementSet ps = new SimplePlacementSet<>(node); CSAssignment assignment = allocateContainersToNode(ps, withNodeHeartbeat); @@ -1141,7 +1171,13 @@ public class CapacityScheduler extends offswitchCount++; } - while (canAllocateMore(assignment, offswitchCount)) { + if (Resources.greaterThan(calculator, getClusterResource(), + assignment.getResource(), Resources.none())) { + assignedContainers++; + } + + while (canAllocateMore(assignment, offswitchCount, + assignedContainers)) { // Try to see if it is possible to allocate multiple container for // the same node heartbeat assignment = allocateContainersToNode(ps, true); @@ -1150,6 +1186,12 @@ public class CapacityScheduler extends && assignment.getType() == NodeType.OFF_SWITCH) { offswitchCount++; } + + if (null != assignment + && Resources.greaterThan(calculator, getClusterResource(), + assignment.getResource(), Resources.none())) { + assignedContainers++; + } } if (offswitchCount >= offswitchPerHeartbeatLimit) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 1e29d50b749..13b9ff69f2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -301,6 +301,21 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur @Private public static final boolean DEFAULT_LAZY_PREEMPTION_ENABLED = false; + @Private + public static final String ASSIGN_MULTIPLE_ENABLED = PREFIX + + "per-node-heartbeat.multiple-assignments-enabled"; + + @Private + public static final boolean DEFAULT_ASSIGN_MULTIPLE_ENABLED = true; + + /** Maximum number of containers to assign on each check-in. */ + @Private + public static final String MAX_ASSIGN_PER_HEARTBEAT = PREFIX + + "per-node-heartbeat.maximum-container-assignments"; + + @Private + public static final int DEFAULT_MAX_ASSIGN_PER_HEARTBEAT = -1; + AppPriorityACLConfigurationParser priorityACLConfig = new AppPriorityACLConfigurationParser(); public CapacitySchedulerConfiguration() { @@ -1473,4 +1488,12 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur } return userWeights; } + + public boolean getAssignMultipleEnabled() { + return getBoolean(ASSIGN_MULTIPLE_ENABLED, DEFAULT_ASSIGN_MULTIPLE_ENABLED); + } + + public int getMaxAssignPerHeartbeat() { + return getInt(MAX_ASSIGN_PER_HEARTBEAT, DEFAULT_MAX_ASSIGN_PER_HEARTBEAT); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index e33fbb33e26..1ceb6fb3ea1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -327,6 +327,10 @@ public class CapacitySchedulerQueueManager implements SchedulerQueueManager< + "it is not yet in stopped state. Current State : " + oldQueue.getState()); } + } else if (oldQueue instanceof ParentQueue + && newQueue instanceof LeafQueue) { + LOG.info("Converting the parent queue: " + oldQueue.getQueuePath() + + " to leaf queue."); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 2e502b77677..d15431e77e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -299,11 +299,6 @@ public class LeafQueue extends AbstractCSQueue { } } - @Override - public String getQueuePath() { - return getParent().getQueuePath() + "." + getQueueName(); - } - /** * Used only by tests. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index f6ada4fcbe7..2e48000c09b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -18,6 +18,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -34,7 +42,6 @@ import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AccessType; @@ -45,7 +52,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedContainerChangeRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger; @@ -62,14 +68,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Placeme import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - @Private @Evolving public class ParentQueue extends AbstractCSQueue { @@ -200,12 +198,6 @@ public class ParentQueue extends AbstractCSQueue { } } - @Override - public String getQueuePath() { - String parentPath = ((parent == null) ? "" : (parent.getQueuePath() + ".")); - return parentPath + getQueueName(); - } - @Override public QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { @@ -315,18 +307,21 @@ public class ParentQueue extends AbstractCSQueue { // Check if the child-queue already exists if (childQueue != null) { - // Check if the child-queue has been converted into parent queue. - // The CS has already checked to ensure that this child-queue is in - // STOPPED state. - if (childQueue instanceof LeafQueue - && newChildQueue instanceof ParentQueue) { - // We would convert this LeafQueue to ParentQueue, consider this - // as the combination of DELETE then ADD. + // Check if the child-queue has been converted into parent queue or + // parent Queue has been converted to child queue. The CS has already + // checked to ensure that this child-queue is in STOPPED state if + // Child queue has been converted to ParentQueue. + if ((childQueue instanceof LeafQueue + && newChildQueue instanceof ParentQueue) + || (childQueue instanceof ParentQueue + && newChildQueue instanceof LeafQueue)) { + // We would convert this LeafQueue to ParentQueue, or vice versa. + // consider this as the combination of DELETE then ADD. newChildQueue.setParent(this); currentChildQueues.put(newChildQueueName, newChildQueue); // inform CapacitySchedulerQueueManager - CapacitySchedulerQueueManager queueManager = this.csContext - .getCapacitySchedulerQueueManager(); + CapacitySchedulerQueueManager queueManager = + this.csContext.getCapacitySchedulerQueueManager(); queueManager.addQueue(newChildQueueName, newChildQueue); continue; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index ad4c8cee5dd..17bb104605d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -426,6 +426,19 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt { // accepted & confirmed, it will become RESERVED state if (schedulerContainer.getRmContainer().getState() == RMContainerState.RESERVED) { + // Check if node currently reserved by other application, there may + // be some outdated proposals in async-scheduling environment + if (schedulerContainer.getRmContainer() != schedulerContainer + .getSchedulerNode().getReservedContainer()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Try to re-reserve a container, but node " + + schedulerContainer.getSchedulerNode() + + " is already reserved by another container" + + schedulerContainer.getSchedulerNode() + .getReservedContainer().getContainerId()); + } + return false; + } // Set reReservation == true reReservation = true; } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java index f143aa65043..71e6f7fd7df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java @@ -23,8 +23,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; @@ -41,7 +39,6 @@ import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.annotations.VisibleForTesting; public class AllocationConfiguration extends ReservationSchedulerConfiguration { - private static final Log LOG = LogFactory.getLog(FSQueue.class.getName()); private static final AccessControlList EVERYBODY_ACL = new AccessControlList("*"); private static final AccessControlList NOBODY_ACL = new AccessControlList(" "); private static final ResourceCalculator RESOURCE_CALCULATOR = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index bc204cbc8c3..313a27ae378 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -266,7 +266,7 @@ public class AllocationFileLoaderService extends AbstractService { Map> configuredQueues = new HashMap<>(); for (FSQueueType queueType : FSQueueType.values()) { - configuredQueues.put(queueType, new HashSet()); + configuredQueues.put(queueType, new HashSet<>()); } // Read and parse the allocations file. @@ -280,7 +280,7 @@ public class AllocationFileLoaderService extends AbstractService { throw new AllocationConfigurationException("Bad fair scheduler config " + "file: top-level element not "); NodeList elements = root.getChildNodes(); - List queueElements = new ArrayList(); + List queueElements = new ArrayList<>(); Element placementPolicyElement = null; for (int i = 0; i < elements.getLength(); i++) { Node node = elements.item(i); @@ -294,8 +294,9 @@ public class AllocationFileLoaderService extends AbstractService { NodeList fields = element.getChildNodes(); for (int j = 0; j < fields.getLength(); j++) { Node fieldNode = fields.item(j); - if (!(fieldNode instanceof Element)) + if (!(fieldNode instanceof Element)) { continue; + } Element field = (Element) fieldNode; if ("maxRunningApps".equals(field.getTagName())) { String text = ((Text)field.getFirstChild()).getData().trim(); @@ -490,8 +491,9 @@ public class AllocationFileLoaderService extends AbstractService { for (int j = 0; j < fields.getLength(); j++) { Node fieldNode = fields.item(j); - if (!(fieldNode instanceof Element)) + if (!(fieldNode instanceof Element)) { continue; + } Element field = (Element) fieldNode; if ("minResources".equals(field.getTagName())) { String text = ((Text)field.getFirstChild()).getData().trim(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index a678bb9ec43..5dfef731e20 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -554,6 +554,15 @@ public class FSAppAttempt extends SchedulerApplicationAttempt this.minshareStarvation = Resources.none(); } + /** + * Get last computed minshare starvation. + * + * @return last computed minshare starvation + */ + Resource getMinshareStarvation() { + return minshareStarvation; + } + void trackContainerForPreemption(RMContainer container) { synchronized (preemptionVariablesLock) { if (containersToPreempt.add(container)) { @@ -842,7 +851,10 @@ public class FSAppAttempt extends SchedulerApplicationAttempt } // The desired container won't fit here, so reserve + // Reserve only, if app does not wait for preempted resources on the node, + // otherwise we may end up with duplicate reservations if (isReservable(capability) && + !node.isPreemptedForApp(this) && reserve(pendingAsk.getPerAllocationResource(), node, reservedContainer, type, schedulerKey)) { updateAMDiagnosticMsg(capability, " exceeds the available resources of " @@ -1110,7 +1122,8 @@ public class FSAppAttempt extends SchedulerApplicationAttempt } if (!starved || - now - lastTimeAtFairShare < getQueue().getFairSharePreemptionTimeout()) { + now - lastTimeAtFairShare < + getQueue().getFairSharePreemptionTimeout()) { fairshareStarvation = Resources.none(); } else { // The app has been starved for longer than preemption-timeout. @@ -1138,7 +1151,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt } /** - * Is application starved for fairshare or minshare + * Is application starved for fairshare or minshare. */ boolean isStarved() { return isStarvedForFairShare() || !Resources.isNone(minshareStarvation); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java index efe36a66af5..b3e59c53dae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java @@ -66,11 +66,11 @@ class FSPreemptionThread extends Thread { schedulerReadLock = scheduler.getSchedulerReadLock(); } + @Override public void run() { while (!Thread.interrupted()) { - FSAppAttempt starvedApp; - try{ - starvedApp = context.getStarvedApps().take(); + try { + FSAppAttempt starvedApp = context.getStarvedApps().take(); // Hold the scheduler readlock so this is not concurrent with the // update thread. schedulerReadLock.lock(); @@ -82,7 +82,7 @@ class FSPreemptionThread extends Thread { starvedApp.preemptionTriggered(delayBeforeNextStarvationCheck); } catch (InterruptedException e) { LOG.info("Preemption thread interrupted! Exiting."); - return; + Thread.currentThread().interrupt(); } } } @@ -112,16 +112,19 @@ class FSPreemptionThread extends Thread { PreemptableContainers bestContainers = null; List potentialNodes = scheduler.getNodeTracker() .getNodesByResourceName(rr.getResourceName()); + int maxAMContainers = Integer.MAX_VALUE; + for (FSSchedulerNode node : potentialNodes) { - int maxAMContainers = bestContainers == null ? - Integer.MAX_VALUE : bestContainers.numAMContainers; PreemptableContainers preemptableContainers = identifyContainersToPreemptOnNode( rr.getCapability(), node, maxAMContainers); + if (preemptableContainers != null) { // This set is better than any previously identified set. bestContainers = preemptableContainers; - if (preemptableContainers.numAMContainers == 0) { + maxAMContainers = bestContainers.numAMContainers; + + if (maxAMContainers == 0) { break; } } @@ -182,13 +185,10 @@ class FSPreemptionThread extends Thread { return preemptableContainers; } } - return null; - } - private boolean isNodeAlreadyReserved( - FSSchedulerNode node, FSAppAttempt app) { - FSAppAttempt nodeReservedApp = node.getReservedAppSchedulable(); - return nodeReservedApp != null && !nodeReservedApp.equals(app); + // Return null if the sum of all preemptable containers' resources + // isn't enough to satisfy the starved request. + return null; } private void trackPreemptionsAgainstNode(List containers, @@ -214,7 +214,7 @@ class FSPreemptionThread extends Thread { } private class PreemptContainersTask extends TimerTask { - private List containers; + private final List containers; PreemptContainersTask(List containers) { this.containers = containers; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java index 6575e0c3cab..93646f47ca6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -35,12 +36,15 @@ import org.apache.hadoop.yarn.util.resource.Resources; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentSkipListSet; +/** + * Fair Scheduler specific node features. + */ @Private @Unstable public class FSSchedulerNode extends SchedulerNode { @@ -122,7 +126,8 @@ public class FSSchedulerNode extends SchedulerNode { SchedulerApplicationAttempt application) { // Cannot unreserve for wrong application... ApplicationAttemptId reservedApplication = - getReservedContainer().getContainer().getId().getApplicationAttemptId(); + getReservedContainer().getContainer().getId() + .getApplicationAttemptId(); if (!reservedApplication.equals( application.getApplicationAttemptId())) { throw new IllegalStateException("Trying to unreserve " + @@ -151,20 +156,37 @@ public class FSSchedulerNode extends SchedulerNode { return new LinkedHashMap<>(resourcesPreemptedForApp); } + /** + * Returns whether a preemption is tracked on the node for the specified app. + * @return if preempted containers are reserved for the app + */ + synchronized boolean isPreemptedForApp(FSAppAttempt app){ + return resourcesPreemptedForApp.containsKey(app); + } + /** * Remove apps that have their preemption requests fulfilled. */ - private synchronized void cleanupPreemptionList() { - Iterator> iterator = - resourcesPreemptedForApp.entrySet().iterator(); - while(iterator.hasNext()) { - FSAppAttempt app = iterator.next().getKey(); - if (app.isStopped() || !app.isStarved()) { + private void cleanupPreemptionList() { + // Synchronize separately to avoid potential deadlocks + // This may cause delayed deletion of reservations + LinkedList candidates; + synchronized (this) { + candidates = Lists.newLinkedList(resourcesPreemptedForApp.keySet()); + } + for (FSAppAttempt app : candidates) { + if (app.isStopped() || !app.isStarved() || + (Resources.isNone(app.getFairshareStarvation()) && + Resources.isNone(app.getMinshareStarvation()))) { // App does not need more resources - Resources.subtractFrom(totalResourcesPreempted, - resourcesPreemptedForApp.get(app)); - appIdToAppMap.remove(app.getApplicationAttemptId()); - iterator.remove(); + synchronized (this) { + Resource removed = resourcesPreemptedForApp.remove(app); + if (removed != null) { + Resources.subtractFrom(totalResourcesPreempted, + removed); + appIdToAppMap.remove(app.getApplicationAttemptId()); + } + } } } } @@ -180,15 +202,23 @@ public class FSSchedulerNode extends SchedulerNode { void addContainersForPreemption(Collection containers, FSAppAttempt app) { - appIdToAppMap.putIfAbsent(app.getApplicationAttemptId(), app); - resourcesPreemptedForApp.putIfAbsent(app, Resource.newInstance(0, 0)); - Resource appReserved = resourcesPreemptedForApp.get(app); + Resource appReserved = Resources.createResource(0); for(RMContainer container : containers) { - containersForPreemption.add(container); - Resources.addTo(appReserved, container.getAllocatedResource()); - Resources.addTo(totalResourcesPreempted, - container.getAllocatedResource()); + if(containersForPreemption.add(container)) { + Resources.addTo(appReserved, container.getAllocatedResource()); + } + } + + synchronized (this) { + if (!Resources.isNone(appReserved)) { + Resources.addTo(totalResourcesPreempted, + appReserved); + appIdToAppMap.putIfAbsent(app.getApplicationAttemptId(), app); + resourcesPreemptedForApp. + putIfAbsent(app, Resource.newInstance(0, 0)); + Resources.addTo(resourcesPreemptedForApp.get(app), appReserved); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index b41d3f71dc5..0f417c39519 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -835,8 +835,19 @@ public class FairScheduler extends // Make sure this application exists FSAppAttempt application = getSchedulerApp(appAttemptId); if (application == null) { - LOG.info("Calling allocate on removed " + - "or non existent application " + appAttemptId); + LOG.error("Calling allocate on removed or non existent application " + + appAttemptId.getApplicationId()); + return EMPTY_ALLOCATION; + } + + // The allocate may be the leftover from previous attempt, and it will + // impact current attempt, such as confuse the request and allocation for + // current attempt's AM container. + // Note outside precondition check for the attempt id may be + // outdated here, so double check it here is necessary. + if (!application.getApplicationAttemptId().equals(appAttemptId)) { + LOG.error("Calling allocate on previous or removed " + + "or non existent application attempt " + appAttemptId); return EMPTY_ALLOCATION; } @@ -985,25 +996,22 @@ public class FairScheduler extends * Assign preempted containers to the applications that have reserved * resources for preempted containers. * @param node Node to check - * @return assignment has occurred */ - static boolean assignPreemptedContainers(FSSchedulerNode node) { - boolean assignedAny = false; + static void assignPreemptedContainers(FSSchedulerNode node) { for (Entry entry : node.getPreemptionList().entrySet()) { FSAppAttempt app = entry.getKey(); Resource preemptionPending = Resources.clone(entry.getValue()); while (!app.isStopped() && !Resources.isNone(preemptionPending)) { Resource assigned = app.assignContainer(node); - if (Resources.isNone(assigned)) { + if (Resources.isNone(assigned) || + assigned.equals(FairScheduler.CONTAINER_RESERVED)) { // Fail to assign, let's not try further break; } - assignedAny = true; Resources.subtractFromNonNegative(preemptionPending, assigned); } } - return assignedAny; } @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index a8d4f48591f..92a88b925d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -329,8 +329,19 @@ public class FifoScheduler extends ContainerUpdates updateRequests) { FifoAppAttempt application = getApplicationAttempt(applicationAttemptId); if (application == null) { - LOG.error("Calling allocate on removed " + - "or non-existent application " + applicationAttemptId); + LOG.error("Calling allocate on removed or non existent application " + + applicationAttemptId.getApplicationId()); + return EMPTY_ALLOCATION; + } + + // The allocate may be the leftover from previous attempt, and it will + // impact current attempt, such as confuse the request and allocation for + // current attempt's AM container. + // Note outside precondition check for the attempt id may be + // outdated here, so double check it here is necessary. + if (!application.getApplicationAttemptId().equals(applicationAttemptId)) { + LOG.error("Calling allocate on previous or removed " + + "or non existent application attempt " + applicationAttemptId); return EMPTY_ALLOCATION; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java index 8c422551f6b..677aa14d29f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMContainerTokenSecretManager.java @@ -186,6 +186,31 @@ public class RMContainerTokenSecretManager extends null, null, ContainerType.TASK); } + /** + * Helper function for creating ContainerTokens. + * + * @param containerId containerId. + * @param containerVersion containerVersion. + * @param nodeId nodeId. + * @param appSubmitter appSubmitter. + * @param capability capability. + * @param priority priority. + * @param createTime createTime. + * @param logAggregationContext logAggregationContext. + * @param nodeLabelExpression nodeLabelExpression. + * @param containerType containerType. + * @return the container-token. + */ + public Token createContainerToken(ContainerId containerId, + int containerVersion, NodeId nodeId, String appSubmitter, + Resource capability, Priority priority, long createTime, + LogAggregationContext logAggregationContext, String nodeLabelExpression, + ContainerType containerType) { + return createContainerToken(containerId, containerVersion, nodeId, + appSubmitter, capability, priority, createTime, null, null, + ContainerType.TASK, ExecutionType.GUARANTEED); + } + /** * Helper function for creating ContainerTokens * @@ -199,13 +224,14 @@ public class RMContainerTokenSecretManager extends * @param logAggregationContext Log Aggregation Context * @param nodeLabelExpression Node Label Expression * @param containerType Container Type + * @param execType Execution Type * @return the container-token */ public Token createContainerToken(ContainerId containerId, int containerVersion, NodeId nodeId, String appSubmitter, Resource capability, Priority priority, long createTime, LogAggregationContext logAggregationContext, String nodeLabelExpression, - ContainerType containerType) { + ContainerType containerType, ExecutionType execType) { byte[] password; ContainerTokenIdentifier tokenIdentifier; long expiryTimeStamp = @@ -220,7 +246,7 @@ public class RMContainerTokenSecretManager extends this.currentMasterKey.getMasterKey().getKeyId(), ResourceManager.getClusterTimeStamp(), priority, createTime, logAggregationContext, nodeLabelExpression, containerType, - ExecutionType.GUARANTEED); + execType); password = this.createPassword(tokenIdentifier); } finally { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java index 4225afd31df..b7ce10540e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java @@ -37,22 +37,22 @@ public class AboutBlock extends HtmlBlock { @Override protected void render(Block html) { - html._(MetricsOverviewTable.class); + html.__(MetricsOverviewTable.class); ResourceManager rm = getInstance(ResourceManager.class); ClusterInfo cinfo = new ClusterInfo(rm); info("Cluster overview"). - _("Cluster ID:", cinfo.getClusterId()). - _("ResourceManager state:", cinfo.getState()). - _("ResourceManager HA state:", cinfo.getHAState()). - _("ResourceManager HA zookeeper connection state:", + __("Cluster ID:", cinfo.getClusterId()). + __("ResourceManager state:", cinfo.getState()). + __("ResourceManager HA state:", cinfo.getHAState()). + __("ResourceManager HA zookeeper connection state:", cinfo.getHAZookeeperConnectionState()). - _("ResourceManager RMStateStore:", cinfo.getRMStateStore()). - _("ResourceManager started on:", Times.format(cinfo.getStartedOn())). - _("ResourceManager version:", cinfo.getRMBuildVersion() + + __("ResourceManager RMStateStore:", cinfo.getRMStateStore()). + __("ResourceManager started on:", Times.format(cinfo.getStartedOn())). + __("ResourceManager version:", cinfo.getRMBuildVersion() + " on " + cinfo.getRMVersionBuiltOn()). - _("Hadoop version:", cinfo.getHadoopBuildVersion() + + __("Hadoop version:", cinfo.getHadoopBuildVersion() + " on " + cinfo.getHadoopVersionBuiltOn()); - html._(InfoBlock.class); + html.__(InfoBlock.class); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java index ef0fdcf1990..f8c04068dde 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutPage.java @@ -22,7 +22,7 @@ import org.apache.hadoop.yarn.webapp.SubView; public class AboutPage extends RmView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java index 45f188782d7..89e2decdf24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java @@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class AppAttemptPage extends RmView { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String appAttemptId = $(YarnWebParams.APPLICATION_ATTEMPT_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java index ccb53dde2b9..27fb43a0219 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppLogAggregationStatusPage.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class AppLogAggregationStatusPage extends RmView{ @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String appId = $(YarnWebParams.APPLICATION_ID); set( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java index 0c5516a304a..7036f33afb2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppPage.java @@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class AppPage extends RmView { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String appId = $(YarnWebParams.APPLICATION_ID); set( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java index 29889ecc6e9..fac100fd51d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlockWithMetrics.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import org.apache.hadoop.yarn.server.webapp.AppsBlock; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; /** @@ -26,7 +25,7 @@ import org.apache.hadoop.yarn.webapp.view.HtmlBlock; */ class AppsBlockWithMetrics extends HtmlBlock { @Override public void render(Block html) { - html._(MetricsOverviewTable.class); - html._(RMAppsBlock.class); + html.__(MetricsOverviewTable.class); + html.__(RMAppsBlock.class); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java index 292c5f37133..f3ab5b0afad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java @@ -47,12 +47,12 @@ import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -105,7 +105,7 @@ class CapacitySchedulerPage extends RmView { info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status for Partition \'" + nodeLabelDisplay + "\'"); renderQueueCapacityInfo(ri, nodeLabel); - html._(InfoBlock.class); + html.__(InfoBlock.class); // clear the info contents so this queue's info doesn't accumulate into // another queue's info ri.clear(); @@ -113,10 +113,10 @@ class CapacitySchedulerPage extends RmView { // second display the queue specific details : ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status") - ._("Queue State:", lqinfo.getQueueState()); + .__("Queue State:", lqinfo.getQueueState()); renderCommonLeafQueueInfo(ri); - html._(InfoBlock.class); + html.__(InfoBlock.class); // clear the info contents so this queue's info doesn't accumulate into // another queue's info ri.clear(); @@ -125,10 +125,10 @@ class CapacitySchedulerPage extends RmView { private void renderLeafQueueInfoWithoutParition(Block html) { ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status") - ._("Queue State:", lqinfo.getQueueState()); + .__("Queue State:", lqinfo.getQueueState()); renderQueueCapacityInfo(ri, ""); renderCommonLeafQueueInfo(ri); - html._(InfoBlock.class); + html.__(InfoBlock.class); // clear the info contents so this queue's info doesn't accumulate into // another queue's info ri.clear(); @@ -155,40 +155,40 @@ class CapacitySchedulerPage extends RmView { ? new ResourceInfo(Resources.none()) : resourceUsages.getAmUsed(); ri. - _("Used Capacity:", percent(capacities.getUsedCapacity() / 100)). - _("Configured Capacity:", percent(capacities.getCapacity() / 100)). - _("Configured Max Capacity:", percent(capacities.getMaxCapacity() / 100)). - _("Absolute Used Capacity:", percent(capacities.getAbsoluteUsedCapacity() / 100)). - _("Absolute Configured Capacity:", percent(capacities.getAbsoluteCapacity() / 100)). - _("Absolute Configured Max Capacity:", percent(capacities.getAbsoluteMaxCapacity() / 100)). - _("Used Resources:", resourceUsages.getUsed().toString()). - _("Configured Max Application Master Limit:", StringUtils.format("%.1f", + __("Used Capacity:", percent(capacities.getUsedCapacity() / 100)). + __("Configured Capacity:", percent(capacities.getCapacity() / 100)). + __("Configured Max Capacity:", percent(capacities.getMaxCapacity() / 100)). + __("Absolute Used Capacity:", percent(capacities.getAbsoluteUsedCapacity() / 100)). + __("Absolute Configured Capacity:", percent(capacities.getAbsoluteCapacity() / 100)). + __("Absolute Configured Max Capacity:", percent(capacities.getAbsoluteMaxCapacity() / 100)). + __("Used Resources:", resourceUsages.getUsed().toString()). + __("Configured Max Application Master Limit:", StringUtils.format("%.1f", capacities.getMaxAMLimitPercentage())). - _("Max Application Master Resources:", + __("Max Application Master Resources:", resourceUsages.getAMLimit().toString()). - _("Used Application Master Resources:", + __("Used Application Master Resources:", amUsed.toString()). - _("Max Application Master Resources Per User:", + __("Max Application Master Resources Per User:", userAMResourceLimit.toString()); } private void renderCommonLeafQueueInfo(ResponseInfo ri) { ri. - _("Num Schedulable Applications:", Integer.toString(lqinfo.getNumActiveApplications())). - _("Num Non-Schedulable Applications:", Integer.toString(lqinfo.getNumPendingApplications())). - _("Num Containers:", Integer.toString(lqinfo.getNumContainers())). - _("Max Applications:", Integer.toString(lqinfo.getMaxApplications())). - _("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())). - _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%"). - _("Configured User Limit Factor:", lqinfo.getUserLimitFactor()). - _("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())). - _("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()). - _("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled"). - _("Default Node Label Expression:", + __("Num Schedulable Applications:", Integer.toString(lqinfo.getNumActiveApplications())). + __("Num Non-Schedulable Applications:", Integer.toString(lqinfo.getNumPendingApplications())). + __("Num Containers:", Integer.toString(lqinfo.getNumContainers())). + __("Max Applications:", Integer.toString(lqinfo.getMaxApplications())). + __("Max Applications Per User:", Integer.toString(lqinfo.getMaxApplicationsPerUser())). + __("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%"). + __("Configured User Limit Factor:", lqinfo.getUserLimitFactor()). + __("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())). + __("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()). + __("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled"). + __("Default Node Label Expression:", lqinfo.getDefaultNodeLabelExpression() == null ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : lqinfo.getDefaultNodeLabelExpression()). - _("Default Application Priority:", + __("Default Application Priority:", Integer.toString(lqinfo.getDefaultApplicationPriority())); } } @@ -208,14 +208,14 @@ class CapacitySchedulerPage extends RmView { protected void render(Block html) { TBODY> tbody = html.table("#userinfo").thead().$class("ui-widget-header").tr().th() - .$class("ui-state-default")._("User Name")._().th() - .$class("ui-state-default")._("Max Resource")._().th() - .$class("ui-state-default")._("Weight")._().th() - .$class("ui-state-default")._("Used Resource")._().th() - .$class("ui-state-default")._("Max AM Resource")._().th() - .$class("ui-state-default")._("Used AM Resource")._().th() - .$class("ui-state-default")._("Schedulable Apps")._().th() - .$class("ui-state-default")._("Non-Schedulable Apps")._()._()._() + .$class("ui-state-default").__("User Name").__().th() + .$class("ui-state-default").__("Max Resource").__().th() + .$class("ui-state-default").__("Weight").__().th() + .$class("ui-state-default").__("Used Resource").__().th() + .$class("ui-state-default").__("Max AM Resource").__().th() + .$class("ui-state-default").__("Used AM Resource").__().th() + .$class("ui-state-default").__("Schedulable Apps").__().th() + .$class("ui-state-default").__("Non-Schedulable Apps").__().__().__() .tbody(); ArrayList users = lqinfo.getUsers().getUsersList(); @@ -240,11 +240,11 @@ class CapacitySchedulerPage extends RmView { .td(resourceUsages.getAMLimit().toString()) .td(amUsed.toString()) .td(Integer.toString(userInfo.getNumActiveApplications())) - .td(Integer.toString(userInfo.getNumPendingApplications()))._(); + .td(Integer.toString(userInfo.getNumPendingApplications())).__(); } - html.div().$class("usersinfo").h5("Active Users Info")._(); - tbody._()._(); + html.div().$class("usersinfo").h5("Active Users Info").__(); + tbody.__().__(); } } @@ -288,25 +288,25 @@ class CapacitySchedulerPage extends RmView { a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)). $title(join("Absolute Capacity:", percent(absCap))). span().$style(join(Q_GIVEN, ";font-size:1px;", width(absCap/absMaxCap))). - _('.')._(). + __('.').__(). span().$style(join(width(absUsedCap/absMaxCap), ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)). - _('.')._(). - span(".q", "Queue: "+info.getQueuePath().substring(5))._(). + __('.').__(). + span(".q", "Queue: "+info.getQueuePath().substring(5)).__(). span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(); + __(join(percent(used), " used")).__(); csqinfo.qinfo = info; if (info.getQueues() == null) { - li.ul("#lq").li()._(LeafQueueInfoBlock.class)._()._(); - li.ul("#lq").li()._(QueueUsersInfoBlock.class)._()._(); + li.ul("#lq").li().__(LeafQueueInfoBlock.class).__().__(); + li.ul("#lq").li().__(QueueUsersInfoBlock.class).__().__(); } else { - li._(QueueBlock.class); + li.__(QueueBlock.class); } - li._(); + li.__(); } - ul._(); + ul.__(); } } @@ -327,7 +327,7 @@ class CapacitySchedulerPage extends RmView { @Override public void render(Block html) { - html._(MetricsOverviewTable.class); + html.__(MetricsOverviewTable.class); UserGroupInformation callerUGI = this.getCallerUGI(); boolean isAdmin = false; @@ -347,10 +347,10 @@ class CapacitySchedulerPage extends RmView { .$style( "border-style: solid; border-color: #000000; border-width: 1px;" + " cursor: hand; cursor: pointer; border-radius: 4px") - .$onclick("confirmAction()").b("Dump scheduler logs")._().select() - .$id("time").option().$value("60")._("1 min")._().option() - .$value("300")._("5 min")._().option().$value("600")._("10 min")._() - ._()._(); + .$onclick("confirmAction()").b("Dump scheduler logs").__().select() + .$id("time").option().$value("60").__("1 min").__().option() + .$value("300").__("5 min").__().option().$value("600").__("10 min").__() + .__().__(); StringBuilder script = new StringBuilder(); script @@ -377,36 +377,36 @@ class CapacitySchedulerPage extends RmView { .append(" console.log(data);").append(" });").append(" }") .append("}"); - html.script().$type("text/javascript")._(script.toString())._(); + html.script().$type("text/javascript").__(script.toString()).__(); } UL>> ul = html. div("#cs-wrapper.ui-widget"). div(".ui-widget-header.ui-corner-top"). - _("Application Queues")._(). + __("Application Queues").__(). div("#cs.ui-widget-content.ui-corner-bottom"). ul(); if (cs == null) { ul. li(). a(_Q).$style(width(Q_MAX_WIDTH)). - span().$style(Q_END)._("100% ")._(). - span(".q", "default")._()._(); + span().$style(Q_END).__("100% ").__(). + span(".q", "default").__().__(); } else { ul. li().$style("margin-bottom: 1em"). - span().$style("font-weight: bold")._("Legend:")._(). + span().$style("font-weight: bold").__("Legend:").__(). span().$class("qlegend ui-corner-all").$style(Q_GIVEN). - _("Capacity")._(). + __("Capacity").__(). span().$class("qlegend ui-corner-all").$style(Q_UNDER). - _("Used")._(). + __("Used").__(). span().$class("qlegend ui-corner-all").$style(Q_OVER). - _("Used (over capacity)")._(). + __("Used (over capacity)").__(). span().$class("qlegend ui-corner-all ui-state-default"). - _("Max Capacity")._(). + __("Max Capacity").__(). span().$class("qlegend ui-corner-all").$style(ACTIVE_USER). - _("Users Requesting Resources")._(). - _(); + __("Users Requesting Resources").__(). + __(); float used = 0; @@ -433,11 +433,11 @@ class CapacitySchedulerPage extends RmView { ul.li(). a(_Q).$style(width(Q_MAX_WIDTH)). span().$style(join(width(used), ";left:0%;", - used > 1 ? Q_OVER : Q_UNDER))._(".")._(). - span(".q", "Queue: root")._(). + used > 1 ? Q_OVER : Q_UNDER)).__(".").__(). + span(".q", "Queue: root").__(). span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(). - _(QueueBlock.class)._(); + __(join(percent(used), " used")).__(). + __(QueueBlock.class).__(); } else { for (RMNodeLabel label : nodeLabelsInfo) { csqinfo.qinfo = null; @@ -453,29 +453,29 @@ class CapacitySchedulerPage extends RmView { ul.li(). a(_Q).$style(width(Q_MAX_WIDTH)). span().$style(join(width(used), ";left:0%;", - used > 1 ? Q_OVER : Q_UNDER))._(".")._(). - span(".q", partitionUiTag)._(). + used > 1 ? Q_OVER : Q_UNDER)).__(".").__(). + span(".q", partitionUiTag).__(). span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._()._(); + __(join(percent(used), " used")).__().__(); //for the queue hierarchy under label UL underLabel = html.ul("#pq"); underLabel.li(). a(_Q).$style(width(Q_MAX_WIDTH)). span().$style(join(width(used), ";left:0%;", - used > 1 ? Q_OVER : Q_UNDER))._(".")._(). - span(".q", "Queue: root")._(). + used > 1 ? Q_OVER : Q_UNDER)).__(".").__(). + span(".q", "Queue: root").__(). span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(). - _(QueueBlock.class)._()._(); + __(join(percent(used), " used")).__(). + __(QueueBlock.class).__().__(); } } } - ul._()._(). + ul.__().__(). script().$type("text/javascript"). - _("$('#cs').hide();")._()._(). - _(RMAppsBlock.class); - html._(HealthBlock.class); + __("$('#cs').hide();").__().__(). + __(RMAppsBlock.class); + html.__(HealthBlock.class); } } @@ -495,13 +495,13 @@ class CapacitySchedulerPage extends RmView { div.h4("Aggregate scheduler counts"); TBODY>> tbody = div.table("#lastrun").thead().$class("ui-widget-header").tr().th() - .$class("ui-state-default")._("Total Container Allocations(count)") - ._().th().$class("ui-state-default") - ._("Total Container Releases(count)")._().th() + .$class("ui-state-default").__("Total Container Allocations(count)") + .__().th().$class("ui-state-default") + .__("Total Container Releases(count)").__().th() .$class("ui-state-default") - ._("Total Fulfilled Reservations(count)")._().th() - .$class("ui-state-default")._("Total Container Preemptions(count)") - ._()._()._().tbody(); + .__("Total Fulfilled Reservations(count)").__().th() + .$class("ui-state-default").__("Total Container Preemptions(count)") + .__().__().__().tbody(); tbody .$class("ui-widget-content") .tr() @@ -512,15 +512,15 @@ class CapacitySchedulerPage extends RmView { String.valueOf(cs.getRootQueueMetrics() .getAggegatedReleasedContainers())) .td(healthInfo.getAggregateFulFilledReservationsCount().toString()) - .td(healthInfo.getAggregatePreemptionCount().toString())._()._()._(); + .td(healthInfo.getAggregatePreemptionCount().toString()).__().__().__(); div.h4("Last scheduler run"); tbody = div.table("#lastrun").thead().$class("ui-widget-header").tr().th() - .$class("ui-state-default")._("Time")._().th() - .$class("ui-state-default")._("Allocations(count - resources)")._() - .th().$class("ui-state-default")._("Reservations(count - resources)") - ._().th().$class("ui-state-default")._("Releases(count - resources)") - ._()._()._().tbody(); + .$class("ui-state-default").__("Time").__().th() + .$class("ui-state-default").__("Allocations(count - resources)").__() + .th().$class("ui-state-default").__("Reservations(count - resources)") + .__().th().$class("ui-state-default").__("Releases(count - resources)") + .__().__().__().tbody(); tbody .$class("ui-widget-content") .tr() @@ -533,7 +533,7 @@ class CapacitySchedulerPage extends RmView { + healthInfo.getResourcesReserved().toString()) .td( healthInfo.getReleaseCount().toString() + " - " - + healthInfo.getResourcesReleased().toString())._()._()._(); + + healthInfo.getResourcesReleased().toString()).__().__().__(); Map info = new HashMap<>(); info.put("Allocation", healthInfo.getLastAllocationDetails()); info.put("Reservation", healthInfo.getLastReservationDetails()); @@ -549,10 +549,10 @@ class CapacitySchedulerPage extends RmView { div.h4("Last " + entry.getKey()); tbody = div.table(table).thead().$class("ui-widget-header").tr().th() - .$class("ui-state-default")._("Time")._().th() - .$class("ui-state-default")._("Container Id")._().th() - .$class("ui-state-default")._("Node Id")._().th() - .$class("ui-state-default")._("Queue")._()._()._().tbody(); + .$class("ui-state-default").__("Time").__().th() + .$class("ui-state-default").__("Container Id").__().th() + .$class("ui-state-default").__("Node Id").__().th() + .$class("ui-state-default").__("Queue").__().__().__().tbody(); SchedulerHealth.DetailedInformation di = entry.getValue(); if (di.getTimestamp() != 0) { containerId = di.getContainerId().toString(); @@ -561,26 +561,26 @@ class CapacitySchedulerPage extends RmView { } tbody.$class("ui-widget-content").tr() .td(Times.format(di.getTimestamp())).td(containerId).td(nodeId) - .td(queue)._()._()._(); + .td(queue).__().__().__(); } - div._(); + div.__(); } } - @Override protected void postHead(Page.HTML<_> html) { + @Override protected void postHead(Page.HTML<__> html) { html. style().$type("text/css"). - _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", + __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", "#cs ul { list-style: none }", "#cs a { font-weight: normal; margin: 2px; position: relative }", "#cs a span { font-weight: normal; font-size: 80% }", "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }", ".qstats { font-weight: normal; font-size: 80%; position: absolute }", ".qlegend { font-weight: normal; padding: 0 1em; margin: 1em }", - "table.info tr th {width: 50%}")._(). // to center info table + "table.info tr th {width: 50%}").__(). // to center info table script("/static/jt/jquery.jstree.js"). script().$type("text/javascript"). - _("$(function() {", + __("$(function() {", " $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');", " $('#cs').bind('loaded.jstree', function (e, data) {", " var callback = { call:reopenQueryNodes }", @@ -603,8 +603,8 @@ class CapacitySchedulerPage extends RmView { " $('#apps').dataTable().fnFilter(q, 4, true);", " });", " $('#cs').show();", - "});")._(). - _(SchedulerPageUtil.QueueBlockUtil.class); + "});").__(). + __(SchedulerPageUtil.QueueBlockUtil.class); } @Override protected Class content() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java index b8cd1adbd58..2cd209b6eae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ContainerPage.java @@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; public class ContainerPage extends RmView { @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String containerId = $(YarnWebParams.CONTAINER_ID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java index d4420640101..0b0884b3a55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java @@ -26,9 +26,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoSchedule import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo; import org.apache.hadoop.yarn.server.webapp.AppsBlock; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -53,16 +53,16 @@ class DefaultSchedulerPage extends RmView { @Override public void render(Block html) { info("\'" + sinfo.getQueueName() + "\' Queue Status"). - _("Queue State:" , sinfo.getState()). - _("Minimum Queue Memory Capacity:" , Long.toString(sinfo.getMinQueueMemoryCapacity())). - _("Maximum Queue Memory Capacity:" , Long.toString(sinfo.getMaxQueueMemoryCapacity())). - _("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())). - _("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())). - _("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())). - _("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())). - _("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers())); + __("Queue State:" , sinfo.getState()). + __("Minimum Queue Memory Capacity:" , Long.toString(sinfo.getMinQueueMemoryCapacity())). + __("Maximum Queue Memory Capacity:" , Long.toString(sinfo.getMaxQueueMemoryCapacity())). + __("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())). + __("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())). + __("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())). + __("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())). + __("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers())); - html._(InfoBlock.class); + html.__(InfoBlock.class); } } @@ -77,11 +77,11 @@ class DefaultSchedulerPage extends RmView { @Override public void render(Block html) { - html._(MetricsOverviewTable.class); + html.__(MetricsOverviewTable.class); UL>> ul = html. div("#cs-wrapper.ui-widget"). div(".ui-widget-header.ui-corner-top"). - _("FifoScheduler Queue")._(). + __("FifoScheduler Queue").__(). div("#cs.ui-widget-content.ui-corner-bottom"). ul(); @@ -89,8 +89,8 @@ class DefaultSchedulerPage extends RmView { ul. li(). a(_Q).$style(width(WIDTH_F)). - span().$style(Q_END)._("100% ")._(). - span(".q", "default")._()._(); + span().$style(Q_END).__("100% ").__(). + span(".q", "default").__().__(); } else { float used = sinfo.getUsedCapacity(); float set = sinfo.getCapacity(); @@ -99,33 +99,33 @@ class DefaultSchedulerPage extends RmView { li(). a(_Q).$style(width(WIDTH_F)). $title(join("used:", percent(used))). - span().$style(Q_END)._("100%")._(). + span().$style(Q_END).__("100%").__(). span().$style(join(width(delta), ';', used > set ? OVER : UNDER, - ';', used > set ? left(set) : left(used)))._(".")._(). - span(".q", sinfo.getQueueName())._(). - _(QueueInfoBlock.class)._(); + ';', used > set ? left(set) : left(used))).__(".").__(). + span(".q", sinfo.getQueueName()).__(). + __(QueueInfoBlock.class).__(); } - ul._()._(). + ul.__().__(). script().$type("text/javascript"). - _("$('#cs').hide();")._()._(). - _(AppsBlock.class); + __("$('#cs').hide();").__().__(). + __(AppsBlock.class); } } - @Override protected void postHead(Page.HTML<_> html) { + @Override protected void postHead(Page.HTML<__> html) { html. style().$type("text/css"). - _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", + __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", "#cs ul { list-style: none }", "#cs a { font-weight: normal; margin: 2px; position: relative }", "#cs a span { font-weight: normal; font-size: 80% }", "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }", - "table.info tr th {width: 50%}")._(). // to center info table + "table.info tr th {width: 50%}").__(). // to center info table script("/static/jt/jquery.jstree.js"). script().$type("text/javascript"). - _("$(function() {", + __("$(function() {", " $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');", " $('#cs').bind('loaded.jstree', function (e, data) {", " data.inst.open_all(); }).", @@ -142,7 +142,7 @@ class DefaultSchedulerPage extends RmView { " $('#apps').dataTable().fnFilter(q, 4);", " });", " $('#cs').show();", - "});")._(); + "});").__(); } @Override protected Class content() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java index 963e53f8037..6fe5c3a9798 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java @@ -34,6 +34,6 @@ public class ErrorBlock extends HtmlBlock { @Override protected void render(Block html) { - html.p()._($(ERROR_MESSAGE))._(); + html.p().__($(ERROR_MESSAGE)).__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java index b7a7a9372c8..ac88f861feb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java @@ -41,9 +41,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -98,7 +98,7 @@ public class FairSchedulerAppsBlock extends HtmlBlock { th(".reservedCpu", "Reserved CPU VCores"). th(".reservedMemory", "Reserved Memory MB"). th(".progress", "Progress"). - th(".ui", "Tracking UI")._()._(). + th(".ui", "Tracking UI").__().__(). tbody(); Collection reqAppStates = null; String reqStateString = $(APP_STATE); @@ -168,8 +168,8 @@ public class FairSchedulerAppsBlock extends HtmlBlock { } appsTableData.append("]"); html.script().$type("text/javascript"). - _("var appsTableData=" + appsTableData)._(); + __("var appsTableData=" + appsTableData).__(); - tbody._()._(); + tbody.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java index 5f46841a48f..ef417d4760f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerPage.java @@ -31,10 +31,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerQue import org.apache.hadoop.yarn.server.webapp.WebPageUtils; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -70,21 +70,23 @@ public class FairSchedulerPage extends RmView { @Override protected void render(Block html) { ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status"). - _("Used Resources:", qinfo.getUsedResources().toString()). - _("Demand Resources:", qinfo.getDemandResources().toString()). - _("Num Active Applications:", qinfo.getNumActiveApplications()). - _("Num Pending Applications:", qinfo.getNumPendingApplications()). - _("Min Resources:", qinfo.getMinResources().toString()). - _("Max Resources:", qinfo.getMaxResources().toString()). - _("Reserved Resources:", qinfo.getReservedResources().toString()); + __("Used Resources:", qinfo.getUsedResources().toString()). + __("Demand Resources:", qinfo.getDemandResources().toString()). + __("AM Used Resources:", qinfo.getAMUsedResources().toString()). + __("AM Max Resources:", qinfo.getAMMaxResources().toString()). + __("Num Active Applications:", qinfo.getNumActiveApplications()). + __("Num Pending Applications:", qinfo.getNumPendingApplications()). + __("Min Resources:", qinfo.getMinResources().toString()). + __("Max Resources:", qinfo.getMaxResources().toString()). + __("Reserved Resources:", qinfo.getReservedResources().toString()); int maxApps = qinfo.getMaxApplications(); if (maxApps < Integer.MAX_VALUE) { - ri._("Max Running Applications:", qinfo.getMaxApplications()); + ri.__("Max Running Applications:", qinfo.getMaxApplications()); } - ri._(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString()); - ri._(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString()); - ri._("Preemptable:", qinfo.isPreemptable()); - html._(InfoBlock.class); + ri.__(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString()); + ri.__(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString()); + ri.__("Preemptable:", qinfo.isPreemptable()); + html.__(InfoBlock.class); // clear the info contents so this queue's info doesn't accumulate into another queue's info ri.clear(); @@ -102,17 +104,17 @@ public class FairSchedulerPage extends RmView { @Override protected void render(Block html) { ResponseInfo ri = info("\'" + qinfo.getQueueName() + "\' Queue Status"). - _("Used Resources:", qinfo.getUsedResources().toString()). - _("Min Resources:", qinfo.getMinResources().toString()). - _("Max Resources:", qinfo.getMaxResources().toString()). - _("Reserved Resources:", qinfo.getReservedResources().toString()); + __("Used Resources:", qinfo.getUsedResources().toString()). + __("Min Resources:", qinfo.getMinResources().toString()). + __("Max Resources:", qinfo.getMaxResources().toString()). + __("Reserved Resources:", qinfo.getReservedResources().toString()); int maxApps = qinfo.getMaxApplications(); if (maxApps < Integer.MAX_VALUE) { - ri._("Max Running Applications:", qinfo.getMaxApplications()); + ri.__("Max Running Applications:", qinfo.getMaxApplications()); } - ri._(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString()); - ri._(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString()); - html._(InfoBlock.class); + ri.__(STEADY_FAIR_SHARE + ":", qinfo.getSteadyFairShare().toString()); + ri.__(INSTANTANEOUS_FAIR_SHARE + ":", qinfo.getFairShare().toString()); + html.__(InfoBlock.class); // clear the info contents so this queue's info doesn't accumulate into another queue's info ri.clear(); @@ -141,28 +143,28 @@ public class FairSchedulerPage extends RmView { $title(join(join(STEADY_FAIR_SHARE + ":", percent(steadyFairShare)), join(" " + INSTANTANEOUS_FAIR_SHARE + ":", percent(instantaneousFairShare)))). span().$style(join(Q_GIVEN, ";font-size:1px;", width(steadyFairShare / capacity))). - _('.')._(). + __('.').__(). span().$style(join(Q_INSTANTANEOUS_FS, ";font-size:1px;", width(instantaneousFairShare/capacity))). - _('.')._(). + __('.').__(). span().$style(join(width(used/capacity), ";font-size:1px;left:0%;", used > instantaneousFairShare ? Q_OVER : Q_UNDER)). - _('.')._(). - span(".q", info.getQueueName())._(). + __('.').__(). + span(".q", info.getQueueName()).__(). span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(); + __(join(percent(used), " used")).__(); fsqinfo.qinfo = info; if (info instanceof FairSchedulerLeafQueueInfo) { - li.ul("#lq").li()._(LeafQueueBlock.class)._()._(); + li.ul("#lq").li().__(LeafQueueBlock.class).__().__(); } else { - li.ul("#lq").li()._(ParentQueueBlock.class)._()._(); - li._(QueueBlock.class); + li.ul("#lq").li().__(ParentQueueBlock.class).__().__(); + li.__(QueueBlock.class); } - li._(); + li.__(); } - ul._(); + ul.__(); } } @@ -177,19 +179,19 @@ public class FairSchedulerPage extends RmView { @Override public void render(Block html) { - html._(MetricsOverviewTable.class); + html.__(MetricsOverviewTable.class); UL>> ul = html. div("#cs-wrapper.ui-widget"). div(".ui-widget-header.ui-corner-top"). - _("Application Queues")._(). + __("Application Queues").__(). div("#cs.ui-widget-content.ui-corner-bottom"). ul(); if (fs == null) { ul. li(). a(_Q).$style(width(Q_MAX_WIDTH)). - span().$style(Q_END)._("100% ")._(). - span(".q", "default")._()._(); + span().$style(Q_END).__("100% ").__(). + span(".q", "default").__().__(); } else { FairSchedulerInfo sinfo = new FairSchedulerInfo(fs); fsqinfo.qinfo = sinfo.getRootQueueInfo(); @@ -197,52 +199,52 @@ public class FairSchedulerPage extends RmView { ul. li().$style("margin-bottom: 1em"). - span().$style("font-weight: bold")._("Legend:")._(). + span().$style("font-weight: bold").__("Legend:").__(). span().$class("qlegend ui-corner-all").$style(Q_GIVEN). $title("The steady fair shares consider all queues, " + "both active (with running applications) and inactive."). - _(STEADY_FAIR_SHARE)._(). + __(STEADY_FAIR_SHARE).__(). span().$class("qlegend ui-corner-all").$style(Q_INSTANTANEOUS_FS). $title("The instantaneous fair shares consider only active " + "queues (with running applications)."). - _(INSTANTANEOUS_FAIR_SHARE)._(). + __(INSTANTANEOUS_FAIR_SHARE).__(). span().$class("qlegend ui-corner-all").$style(Q_UNDER). - _("Used")._(). + __("Used").__(). span().$class("qlegend ui-corner-all").$style(Q_OVER). - _("Used (over fair share)")._(). + __("Used (over fair share)").__(). span().$class("qlegend ui-corner-all ui-state-default"). - _("Max Capacity")._(). - _(). + __("Max Capacity").__(). + __(). li(). a(_Q).$style(width(Q_MAX_WIDTH)). span().$style(join(width(used), ";left:0%;", - used > 1 ? Q_OVER : Q_UNDER))._(".")._(). - span(".q", "root")._(). + used > 1 ? Q_OVER : Q_UNDER)).__(".").__(). + span(".q", "root").__(). span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(). - _(QueueBlock.class)._(); + __(join(percent(used), " used")).__(). + __(QueueBlock.class).__(); } - ul._()._(). + ul.__().__(). script().$type("text/javascript"). - _("$('#cs').hide();")._()._(). - _(FairSchedulerAppsBlock.class); + __("$('#cs').hide();").__().__(). + __(FairSchedulerAppsBlock.class); } } - @Override protected void postHead(Page.HTML<_> html) { + @Override protected void postHead(Page.HTML<__> html) { html. style().$type("text/css"). - _("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", + __("#cs { padding: 0.5em 0 1em 0; margin-bottom: 1em; position: relative }", "#cs ul { list-style: none }", "#cs a { font-weight: normal; margin: 2px; position: relative }", "#cs a span { font-weight: normal; font-size: 80% }", "#cs-wrapper .ui-widget-header { padding: 0.2em 0.5em }", ".qstats { font-weight: normal; font-size: 80%; position: absolute }", ".qlegend { font-weight: normal; padding: 0 1em; margin: 1em }", - "table.info tr th {width: 50%}")._(). // to center info table + "table.info tr th {width: 50%}").__(). // to center info table script("/static/jt/jquery.jstree.js"). script().$type("text/javascript"). - _("$(function() {", + __("$(function() {", " $('#cs a span').addClass('ui-corner-all').css('position', 'absolute');", " $('#cs').bind('loaded.jstree', function (e, data) {", " var callback = { call:reopenQueryNodes }", @@ -262,8 +264,8 @@ public class FairSchedulerPage extends RmView { " $('#apps').dataTable().fnFilter(q, 4, true);", " });", " $('#cs').show();", - "});")._(). - _(SchedulerPageUtil.QueueBlockUtil.class); + "});").__(). + __(SchedulerPageUtil.QueueBlockUtil.class); } @Override protected Class content() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index fe7b2470044..f6b1a943a60 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -24,8 +24,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsIn import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -61,19 +61,19 @@ public class MetricsOverviewTable extends HtmlBlock { table("#metricsoverview"). thead().$class("ui-widget-header"). tr(). - th().$class("ui-state-default")._("Apps Submitted")._(). - th().$class("ui-state-default")._("Apps Pending")._(). - th().$class("ui-state-default")._("Apps Running")._(). - th().$class("ui-state-default")._("Apps Completed")._(). - th().$class("ui-state-default")._("Containers Running")._(). - th().$class("ui-state-default")._("Memory Used")._(). - th().$class("ui-state-default")._("Memory Total")._(). - th().$class("ui-state-default")._("Memory Reserved")._(). - th().$class("ui-state-default")._("VCores Used")._(). - th().$class("ui-state-default")._("VCores Total")._(). - th().$class("ui-state-default")._("VCores Reserved")._(). - _(). - _(). + th().$class("ui-state-default").__("Apps Submitted").__(). + th().$class("ui-state-default").__("Apps Pending").__(). + th().$class("ui-state-default").__("Apps Running").__(). + th().$class("ui-state-default").__("Apps Completed").__(). + th().$class("ui-state-default").__("Containers Running").__(). + th().$class("ui-state-default").__("Memory Used").__(). + th().$class("ui-state-default").__("Memory Total").__(). + th().$class("ui-state-default").__("Memory Reserved").__(). + th().$class("ui-state-default").__("VCores Used").__(). + th().$class("ui-state-default").__("VCores Total").__(). + th().$class("ui-state-default").__("VCores Reserved").__(). + __(). + __(). tbody().$class("ui-widget-content"). tr(). td(String.valueOf(clusterMetrics.getAppsSubmitted())). @@ -92,33 +92,33 @@ public class MetricsOverviewTable extends HtmlBlock { td(String.valueOf(clusterMetrics.getAllocatedVirtualCores())). td(String.valueOf(clusterMetrics.getTotalVirtualCores())). td(String.valueOf(clusterMetrics.getReservedVirtualCores())). - _(). - _()._(); + __(). + __().__(); div.h3("Cluster Nodes Metrics"). table("#nodemetricsoverview"). thead().$class("ui-widget-header"). tr(). - th().$class("ui-state-default")._("Active Nodes")._(). - th().$class("ui-state-default")._("Decommissioning Nodes")._(). - th().$class("ui-state-default")._("Decommissioned Nodes")._(). - th().$class("ui-state-default")._("Lost Nodes")._(). - th().$class("ui-state-default")._("Unhealthy Nodes")._(). - th().$class("ui-state-default")._("Rebooted Nodes")._(). - th().$class("ui-state-default")._("Shutdown Nodes")._(). - _(). - _(). + th().$class("ui-state-default").__("Active Nodes").__(). + th().$class("ui-state-default").__("Decommissioning Nodes").__(). + th().$class("ui-state-default").__("Decommissioned Nodes").__(). + th().$class("ui-state-default").__("Lost Nodes").__(). + th().$class("ui-state-default").__("Unhealthy Nodes").__(). + th().$class("ui-state-default").__("Rebooted Nodes").__(). + th().$class("ui-state-default").__("Shutdown Nodes").__(). + __(). + __(). tbody().$class("ui-widget-content"). tr(). - td().a(url("nodes"),String.valueOf(clusterMetrics.getActiveNodes()))._(). - td().a(url("nodes/decommissioning"), String.valueOf(clusterMetrics.getDecommissioningNodes()))._(). - td().a(url("nodes/decommissioned"),String.valueOf(clusterMetrics.getDecommissionedNodes()))._(). - td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._(). - td().a(url("nodes/unhealthy"),String.valueOf(clusterMetrics.getUnhealthyNodes()))._(). - td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._(). - td().a(url("nodes/shutdown"),String.valueOf(clusterMetrics.getShutdownNodes()))._(). - _(). - _()._(); + td().a(url("nodes"), String.valueOf(clusterMetrics.getActiveNodes())).__(). + td().a(url("nodes/decommissioning"), String.valueOf(clusterMetrics.getDecommissioningNodes())).__(). + td().a(url("nodes/decommissioned"), String.valueOf(clusterMetrics.getDecommissionedNodes())).__(). + td().a(url("nodes/lost"), String.valueOf(clusterMetrics.getLostNodes())).__(). + td().a(url("nodes/unhealthy"), String.valueOf(clusterMetrics.getUnhealthyNodes())).__(). + td().a(url("nodes/rebooted"), String.valueOf(clusterMetrics.getRebootedNodes())).__(). + td().a(url("nodes/shutdown"), String.valueOf(clusterMetrics.getShutdownNodes())).__(). + __(). + __().__(); String user = request().getRemoteUser(); if (user != null) { @@ -128,21 +128,21 @@ public class MetricsOverviewTable extends HtmlBlock { table("#usermetricsoverview"). thead().$class("ui-widget-header"). tr(). - th().$class("ui-state-default")._("Apps Submitted")._(). - th().$class("ui-state-default")._("Apps Pending")._(). - th().$class("ui-state-default")._("Apps Running")._(). - th().$class("ui-state-default")._("Apps Completed")._(). - th().$class("ui-state-default")._("Containers Running")._(). - th().$class("ui-state-default")._("Containers Pending")._(). - th().$class("ui-state-default")._("Containers Reserved")._(). - th().$class("ui-state-default")._("Memory Used")._(). - th().$class("ui-state-default")._("Memory Pending")._(). - th().$class("ui-state-default")._("Memory Reserved")._(). - th().$class("ui-state-default")._("VCores Used")._(). - th().$class("ui-state-default")._("VCores Pending")._(). - th().$class("ui-state-default")._("VCores Reserved")._(). - _(). - _(). + th().$class("ui-state-default").__("Apps Submitted").__(). + th().$class("ui-state-default").__("Apps Pending").__(). + th().$class("ui-state-default").__("Apps Running").__(). + th().$class("ui-state-default").__("Apps Completed").__(). + th().$class("ui-state-default").__("Containers Running").__(). + th().$class("ui-state-default").__("Containers Pending").__(). + th().$class("ui-state-default").__("Containers Reserved").__(). + th().$class("ui-state-default").__("Memory Used").__(). + th().$class("ui-state-default").__("Memory Pending").__(). + th().$class("ui-state-default").__("Memory Reserved").__(). + th().$class("ui-state-default").__("VCores Used").__(). + th().$class("ui-state-default").__("VCores Pending").__(). + th().$class("ui-state-default").__("VCores Reserved").__(). + __(). + __(). tbody().$class("ui-widget-content"). tr(). td(String.valueOf(userMetrics.getAppsSubmitted())). @@ -163,8 +163,8 @@ public class MetricsOverviewTable extends HtmlBlock { td(String.valueOf(userMetrics.getAllocatedVirtualCores())). td(String.valueOf(userMetrics.getPendingVirtualCores())). td(String.valueOf(userMetrics.getReservedVirtualCores())). - _(). - _()._(); + __(). + __().__(); } } @@ -175,14 +175,14 @@ public class MetricsOverviewTable extends HtmlBlock { table("#schedulermetricsoverview"). thead().$class("ui-widget-header"). tr(). - th().$class("ui-state-default")._("Scheduler Type")._(). - th().$class("ui-state-default")._("Scheduling Resource Type")._(). - th().$class("ui-state-default")._("Minimum Allocation")._(). - th().$class("ui-state-default")._("Maximum Allocation")._(). + th().$class("ui-state-default").__("Scheduler Type").__(). + th().$class("ui-state-default").__("Scheduling Resource Type").__(). + th().$class("ui-state-default").__("Minimum Allocation").__(). + th().$class("ui-state-default").__("Maximum Allocation").__(). th().$class("ui-state-default") - ._("Maximum Cluster Application Priority")._(). - _(). - _(). + .__("Maximum Cluster Application Priority").__(). + __(). + __(). tbody().$class("ui-widget-content"). tr(). td(String.valueOf(schedulerInfo.getSchedulerType())). @@ -190,9 +190,9 @@ public class MetricsOverviewTable extends HtmlBlock { td(schedulerInfo.getMinAllocation().toString()). td(schedulerInfo.getMaxAllocation().toString()). td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())). - _(). - _()._(); + __(). + __().__(); - div._(); + div.__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java index ca55175c244..1993f6c0264 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java @@ -23,10 +23,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.UL; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; public class NavBlock extends HtmlBlock { @@ -45,29 +45,29 @@ public class NavBlock extends HtmlBlock { div("#nav"). h3("Cluster"). ul(). - li().a(url("cluster"), "About")._(). - li().a(url("nodes"), "Nodes")._(). - li().a(url("nodelabels"), "Node Labels")._(); + li().a(url("cluster"), "About").__(). + li().a(url("nodes"), "Nodes").__(). + li().a(url("nodelabels"), "Node Labels").__(); UL>>> subAppsList = mainList. li().a(url("apps"), "Applications"). ul(); - subAppsList.li()._(); + subAppsList.li().__(); for (YarnApplicationState state : YarnApplicationState.values()) { subAppsList. - li().a(url("apps", state.toString()), state.toString())._(); + li().a(url("apps", state.toString()), state.toString()).__(); } - subAppsList._()._(); + subAppsList.__().__(); UL> tools = mainList. - li().a(url("scheduler"), "Scheduler")._()._(). + li().a(url("scheduler"), "Scheduler").__().__(). h3("Tools").ul(); - tools.li().a("/conf", "Configuration")._(). - li().a("/logs", "Local logs")._(). - li().a("/stacks", "Server stacks")._(). - li().a("/jmx?qry=Hadoop:*", "Server metrics")._(); + tools.li().a("/conf", "Configuration").__(). + li().a("/logs", "Local logs").__(). + li().a("/stacks", "Server stacks").__(). + li().a("/jmx?qry=Hadoop:*", "Server metrics").__(); if (addErrorsAndWarningsLink) { - tools.li().a(url("errors-and-warnings"), "Errors/Warnings")._(); + tools.li().a(url("errors-and-warnings"), "Errors/Warnings").__(); } - tools._()._(); + tools.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java index ea85d13ea5e..6ff76281007 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java @@ -26,10 +26,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -53,7 +53,7 @@ public class NodeLabelsPage extends RmView { th(".type", "Label Type"). th(".numOfActiveNMs", "Num Of Active NMs"). th(".totalResource", "Total Resource"). - _()._(). + __().__(). tbody(); RMNodeLabelsManager nlm = rm.getRMContext().getNodeLabelManager(); @@ -71,17 +71,17 @@ public class NodeLabelsPage extends RmView { .a(url("nodes", "?" + YarnWebParams.NODE_LABEL + "=" + info.getLabelName()), String.valueOf(nActiveNMs)) - ._(); + .__(); } else { row = row.td(String.valueOf(nActiveNMs)); } - row.td(info.getResource().toString())._(); + row.td(info.getResource().toString()).__(); } - tbody._()._(); + tbody.__().__(); } } - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); String title = "Node labels of the cluster"; setTitle(title); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index c03df63b27c..d0e384da34f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -30,9 +30,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.SubView; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import java.util.Collection; @@ -63,7 +63,7 @@ class NodesPage extends RmView { @Override protected void render(Block html) { - html._(MetricsOverviewTable.class); + html.__(MetricsOverviewTable.class); ResourceScheduler sched = rm.getResourceScheduler(); @@ -98,7 +98,7 @@ class NodesPage extends RmView { } TBODY> tbody = - trbody.th(".nodeManagerVersion", "Version")._()._().tbody(); + trbody.th(".nodeManagerVersion", "Version").__().__().tbody(); NodeState stateFilter = null; if (type != null && !type.isEmpty()) { @@ -201,13 +201,13 @@ class NodesPage extends RmView { } nodeTableData.append("]"); html.script().$type("text/javascript") - ._("var nodeTableData=" + nodeTableData)._(); - tbody._()._(); + .__("var nodeTableData=" + nodeTableData).__(); + tbody.__().__(); } } @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String type = $(NODE_STATE); String title = "Nodes of the cluster"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java index 40e1e9421ad..82ddb54b70d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java @@ -45,10 +45,10 @@ import org.apache.hadoop.yarn.server.webapp.AppAttemptBlock; import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -86,7 +86,7 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ .th(".resource", "ResourceName").th(".capacity", "Capability") .th(".containers", "NumContainers") .th(".relaxlocality", "RelaxLocality") - .th(".labelexpression", "NodeLabelExpression")._()._().tbody(); + .th(".labelexpression", "NodeLabelExpression").__().__().tbody(); StringBuilder resourceRequestTableData = new StringBuilder("[\n"); for (ResourceRequestInfo resourceRequest : resourceRequests) { @@ -114,9 +114,9 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ } resourceRequestTableData.append("]"); html.script().$type("text/javascript") - ._("var resourceRequestsTableData=" + resourceRequestTableData)._(); - tbody._()._(); - div._(); + .__("var resourceRequestsTableData=" + resourceRequestTableData).__(); + tbody.__().__(); + div.__(); } private Resource getTotalResource(List requests) { @@ -163,7 +163,7 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ th(_TH, "Node Local Request"). th(_TH, "Rack Local Request"). th(_TH, "Off Switch Request"). - _(); + __(); String[] containersType = { "Num Node Local Containers (satisfied by)", "Num Rack Local Containers (satisfied by)", @@ -173,10 +173,10 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ table.tr((odd = !odd) ? _ODD : _EVEN).td(containersType[i]) .td(String.valueOf(attemptMetrics.getLocalityStatistics()[i][0])) .td(i == 0 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][1])) - .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2]))._(); + .td(i <= 1 ? "" : String.valueOf(attemptMetrics.getLocalityStatistics()[i][2])).__(); } - table._(); - div._(); + table.__(); + div.__(); } private boolean isApplicationInFinalState(YarnApplicationAttemptState state) { @@ -192,12 +192,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ if (!isApplicationInFinalState(YarnApplicationAttemptState .valueOf(attempt.getAppAttemptState().toString()))) { RMAppAttemptMetrics metrics = attempt.getRMAppAttemptMetrics(); - DIV pdiv = html._(InfoBlock.class).div(_INFO_WRAP); + DIV pdiv = html.__(InfoBlock.class).div(_INFO_WRAP); info("Application Attempt Overview").clear(); - info("Application Attempt Metrics")._( + info("Application Attempt Metrics").__( "Application Attempt Headroom : ", metrics == null ? "N/A" : metrics.getApplicationAttemptHeadroom()); - pdiv._(); + pdiv.__(); } } } @@ -226,23 +226,23 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ .getBlacklistUpdates().getBlacklistAdditions()); info("Application Attempt Overview") - ._( + .__( "Application Attempt State:", appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt .getAppAttemptState()) - ._("Started:", Times.format(appAttempt.getStartedTime())) - ._("Elapsed:", + .__("Started:", Times.format(appAttempt.getStartedTime())) + .__("Elapsed:", org.apache.hadoop.util.StringUtils.formatTime(Times.elapsed( appAttempt.getStartedTime(), appAttempt.getFinishedTime()))) - ._( + .__( "AM Container:", appAttempt.getAmContainerId() == null || containers == null || !hasAMContainer(appAttemptReport.getAMContainerId(), containers) ? null : root_url("container", appAttempt.getAmContainerId()), appAttempt.getAmContainerId() == null ? "N/A" : String.valueOf(appAttempt.getAmContainerId())) - ._("Node:", node) - ._( + .__("Node:", node) + .__( "Tracking URL:", appAttempt.getTrackingUrl() == null || appAttempt.getTrackingUrl().equals(UNAVAILABLE) ? null @@ -254,12 +254,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{ || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.FAILED || appAttempt.getAppAttemptState() == YarnApplicationAttemptState.KILLED ? "History" : "ApplicationMaster") - ._( + .__( "Diagnostics Info:", appAttempt.getDiagnosticsInfo() == null ? "" : appAttempt .getDiagnosticsInfo()) - ._("Nodes blacklisted by the application:", appBlacklistedNodes) - ._("Nodes blacklisted by the system:", rmBlackListedNodes); + .__("Nodes blacklisted by the application:", appBlacklistedNodes) + .__("Nodes blacklisted by the system:", rmBlackListedNodes); } private String getNodeString(Collection nodes) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java index e5d6c16a729..cd04264d07c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java @@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptM import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo; import org.apache.hadoop.yarn.server.webapp.AppBlock; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -82,33 +82,33 @@ public class RMAppBlock extends AppBlock{ attemptMetrics == null ? 0 : attemptMetrics .getNumNonAMContainersPreempted(); DIV pdiv = html. - _(InfoBlock.class). + __(InfoBlock.class). div(_INFO_WRAP); info("Application Overview").clear(); info("Application Metrics") - ._("Total Resource Preempted:", + .__("Total Resource Preempted:", appMetrics == null ? "N/A" : appMetrics.getResourcePreempted()) - ._("Total Number of Non-AM Containers Preempted:", + .__("Total Number of Non-AM Containers Preempted:", appMetrics == null ? "N/A" : appMetrics.getNumNonAMContainersPreempted()) - ._("Total Number of AM Containers Preempted:", + .__("Total Number of AM Containers Preempted:", appMetrics == null ? "N/A" : appMetrics.getNumAMContainersPreempted()) - ._("Resource Preempted from Current Attempt:", + .__("Resource Preempted from Current Attempt:", attemptResourcePreempted) - ._("Number of Non-AM Containers Preempted from Current Attempt:", + .__("Number of Non-AM Containers Preempted from Current Attempt:", attemptNumNonAMContainerPreempted) - ._("Aggregate Resource Allocation:", + .__("Aggregate Resource Allocation:", String.format("%d MB-seconds, %d vcore-seconds", appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(), appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds())) - ._("Aggregate Preempted Resource Allocation:", + .__("Aggregate Preempted Resource Allocation:", String.format("%d MB-seconds, %d vcore-seconds", appMetrics == null ? "N/A" : appMetrics.getPreemptedMemorySeconds(), appMetrics == null ? "N/A" : appMetrics.getPreemptedVcoreSeconds())); - pdiv._(); + pdiv.__(); } @Override @@ -122,7 +122,7 @@ public class RMAppBlock extends AppBlock{ .th(".appBlacklistednodes", "Nodes blacklisted by the application", "Nodes blacklisted by the app") .th(".rmBlacklistednodes", "Nodes blacklisted by the RM for the" - + " app", "Nodes blacklisted by the system")._()._().tbody(); + + " app", "Nodes blacklisted by the system").__().__().tbody(); RMApp rmApp = this.rm.getRMContext().getRMApps().get(this.appID); if (rmApp == null) { @@ -174,9 +174,9 @@ public class RMAppBlock extends AppBlock{ } attemptsTableData.append("]"); html.script().$type("text/javascript") - ._("var attemptsTableData=" + attemptsTableData)._(); + .__("var attemptsTableData=" + attemptsTableData).__(); - tbody._()._(); + tbody.__().__(); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java index f7f7c971788..c1f2e5ed109 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppLogAggregationStatusBlock.java @@ -38,9 +38,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.util.Apps; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; @@ -87,28 +87,28 @@ public class RMAppLogAggregationStatusBlock extends HtmlBlock { tr(). th(_TH, "Log Aggregation Status"). th(_TH, "Description"). - _(); + __(); table_description.tr().td(LogAggregationStatus.DISABLED.name()) - .td("Log Aggregation is Disabled.")._(); + .td("Log Aggregation is Disabled.").__(); table_description.tr().td(LogAggregationStatus.NOT_START.name()) - .td("Log Aggregation does not Start.")._(); + .td("Log Aggregation does not Start.").__(); table_description.tr().td(LogAggregationStatus.RUNNING.name()) - .td("Log Aggregation is Running.")._(); + .td("Log Aggregation is Running.").__(); table_description.tr().td(LogAggregationStatus.RUNNING_WITH_FAILURE.name()) .td("Log Aggregation is Running, but has failures " - + "in previous cycles")._(); + + "in previous cycles").__(); table_description.tr().td(LogAggregationStatus.SUCCEEDED.name()) .td("Log Aggregation is Succeeded. All of the logs have been " - + "aggregated successfully.")._(); + + "aggregated successfully.").__(); table_description.tr().td(LogAggregationStatus.FAILED.name()) .td("Log Aggregation is Failed. At least one of the logs " - + "have not been aggregated.")._(); + + "have not been aggregated.").__(); table_description.tr().td(LogAggregationStatus.TIME_OUT.name()) .td("The application is finished, but the log aggregation status is " + "not updated for a long time. Not sure whether the log aggregation " - + "is finished or not.")._(); - table_description._(); - div_description._(); + + "is finished or not.").__(); + table_description.__(); + div_description.__(); RMApp rmApp = rm.getRMContext().getRMApps().get(appId); // Application Log aggregation status Table @@ -131,7 +131,7 @@ public class RMAppLogAggregationStatusBlock extends HtmlBlock { .th(_TH, "Last " + maxLogAggregationDiagnosticsInMemory + " Diagnostic Messages") .th(_TH, "Last " - + maxLogAggregationDiagnosticsInMemory + " Failure Messages")._(); + + maxLogAggregationDiagnosticsInMemory + " Failure Messages").__(); if (rmApp != null) { Map logAggregationReports = @@ -152,11 +152,11 @@ public class RMAppLogAggregationStatusBlock extends HtmlBlock { .td(report.getKey().toString()) .td(status == null ? "N/A" : status.toString()) .td(message == null ? "N/A" : message) - .td(failureMessage == null ? "N/A" : failureMessage)._(); + .td(failureMessage == null ? "N/A" : failureMessage).__(); } } } - table._(); - div._(); + table.__(); + div.__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java index 61674d2d342..ede71e34378 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppsBlock.java @@ -35,11 +35,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.webapp.AppsBlock; import org.apache.hadoop.yarn.server.webapp.dao.AppInfo; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.View; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.TBODY; import com.google.inject.Inject; @@ -72,8 +71,8 @@ public class RMAppsBlock extends AppsBlock { .th(".clusterPercentage", "% of Cluster") .th(".progress", "Progress") .th(".ui", "Tracking UI") - .th(".blacklisted", "Blacklisted Nodes")._() - ._().tbody(); + .th(".blacklisted", "Blacklisted Nodes").__() + .__().tbody(); StringBuilder appsTableData = new StringBuilder("[\n"); for (ApplicationReport appReport : appReports) { @@ -190,8 +189,8 @@ public class RMAppsBlock extends AppsBlock { } appsTableData.append("]"); html.script().$type("text/javascript") - ._("var appsTableData=" + appsTableData)._(); + .__("var appsTableData=" + appsTableData).__(); - tbody._()._(); + tbody.__().__(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java index 216deeb44ad..c2ac59d20f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMErrorsAndWarningsPage.java @@ -31,7 +31,7 @@ public class RMErrorsAndWarningsPage extends RmView { } @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); String title = "Errors and Warnings in the ResourceManager"; setTitle(title); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java index 23d4bb1f9ce..5a945daf864 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java @@ -168,6 +168,12 @@ public final class RMWSConsts { */ public static final String APPS_TIMEOUT = "/apps/{appid}/timeout"; + /** + * Path for {@code RouterWebServices#getContainer}. + */ + public static final String GET_CONTAINER = + "/apps/{appid}/appattempts/{appattemptid}/containers/{containerid}"; + // ----------------QueryParams for RMWebServiceProtocol---------------- public static final String TIME = "time"; @@ -194,6 +200,15 @@ public final class RMWSConsts { public static final String END_TIME = "end-time"; public static final String INCLUDE_RESOURCE = "include-resource-allocations"; public static final String TYPE = "type"; + public static final String CONTAINERID = "containerid"; + public static final String APPATTEMPTS = "appattempts"; + public static final String TIMEOUTS = "timeouts"; + public static final String PRIORITY = "priority"; + public static final String TIMEOUT = "timeout"; + public static final String ATTEMPTS = "appattempts"; + public static final String GET_LABELS = "get-labels"; + public static final String DESELECTS = "deSelects"; + public static final String CONTAINERS = "containers"; private RMWSConsts() { // not called diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java index 263828b7766..531ce975ab7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebAppUtil.java @@ -18,21 +18,48 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.Principal; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import javax.servlet.http.HttpServletRequest; + +import org.apache.commons.codec.binary.Base64; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.lib.StaticUserWebFilter; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.HttpCrossOriginFilterInitializer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LogAggregationContext; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CredentialsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LogAggregationContextInfo; import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter; import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer; +import org.apache.hadoop.yarn.webapp.BadRequestException; /** * Util class for ResourceManager WebApp. @@ -146,4 +173,201 @@ public final class RMWebAppUtil { } } } + + /** + * Create the actual ApplicationSubmissionContext to be submitted to the RM + * from the information provided by the user. + * + * @param newApp the information provided by the user + * @param conf RM configuration + * @return returns the constructed ApplicationSubmissionContext + * @throws IOException in case of Error + */ + public static ApplicationSubmissionContext createAppSubmissionContext( + ApplicationSubmissionContextInfo newApp, Configuration conf) + throws IOException { + + // create local resources and app submission context + + ApplicationId appid; + String error = + "Could not parse application id " + newApp.getApplicationId(); + try { + appid = ApplicationId.fromString(newApp.getApplicationId()); + } catch (Exception e) { + throw new BadRequestException(error); + } + ApplicationSubmissionContext appContext = ApplicationSubmissionContext + .newInstance(appid, newApp.getApplicationName(), newApp.getQueue(), + Priority.newInstance(newApp.getPriority()), + createContainerLaunchContext(newApp), newApp.getUnmanagedAM(), + newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(), + createAppSubmissionContextResource(newApp, conf), + newApp.getApplicationType(), + newApp.getKeepContainersAcrossApplicationAttempts(), + newApp.getAppNodeLabelExpression(), + newApp.getAMContainerNodeLabelExpression()); + appContext.setApplicationTags(newApp.getApplicationTags()); + appContext.setAttemptFailuresValidityInterval( + newApp.getAttemptFailuresValidityInterval()); + if (newApp.getLogAggregationContextInfo() != null) { + appContext.setLogAggregationContext( + createLogAggregationContext(newApp.getLogAggregationContextInfo())); + } + String reservationIdStr = newApp.getReservationId(); + if (reservationIdStr != null && !reservationIdStr.isEmpty()) { + ReservationId reservationId = + ReservationId.parseReservationId(reservationIdStr); + appContext.setReservationID(reservationId); + } + return appContext; + } + + /** + * Create the actual Resource inside the ApplicationSubmissionContextInfo to + * be submitted to the RM from the information provided by the user. + * + * @param newApp the information provided by the user + * @param conf RM configuration + * @return returns the constructed Resource inside the + * ApplicationSubmissionContextInfo + * @throws BadRequestException + */ + private static Resource createAppSubmissionContextResource( + ApplicationSubmissionContextInfo newApp, Configuration conf) + throws BadRequestException { + if (newApp.getResource().getvCores() > conf.getInt( + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) { + String msg = "Requested more cores than configured max"; + throw new BadRequestException(msg); + } + if (newApp.getResource().getMemorySize() > conf.getInt( + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) { + String msg = "Requested more memory than configured max"; + throw new BadRequestException(msg); + } + Resource r = Resource.newInstance(newApp.getResource().getMemorySize(), + newApp.getResource().getvCores()); + return r; + } + + /** + * Create the ContainerLaunchContext required for the + * ApplicationSubmissionContext. This function takes the user information and + * generates the ByteBuffer structures required by the ContainerLaunchContext + * + * @param newApp the information provided by the user + * @return created context + * @throws BadRequestException + * @throws IOException + */ + private static ContainerLaunchContext createContainerLaunchContext( + ApplicationSubmissionContextInfo newApp) + throws BadRequestException, IOException { + + // create container launch context + + HashMap hmap = new HashMap(); + for (Map.Entry entry : newApp + .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) { + if (!entry.getValue().isEmpty()) { + Base64 decoder = new Base64(0, null, true); + byte[] data = decoder.decode(entry.getValue()); + hmap.put(entry.getKey(), ByteBuffer.wrap(data)); + } + } + + HashMap hlr = new HashMap(); + for (Map.Entry entry : newApp + .getContainerLaunchContextInfo().getResources().entrySet()) { + LocalResourceInfo l = entry.getValue(); + LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()), + l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp()); + hlr.put(entry.getKey(), lr); + } + + DataOutputBuffer out = new DataOutputBuffer(); + Credentials cs = createCredentials( + newApp.getContainerLaunchContextInfo().getCredentials()); + cs.writeTokenStorageToStream(out); + ByteBuffer tokens = ByteBuffer.wrap(out.getData()); + + ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr, + newApp.getContainerLaunchContextInfo().getEnvironment(), + newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens, + newApp.getContainerLaunchContextInfo().getAcls()); + + return ctx; + } + + /** + * Generate a Credentials object from the information in the CredentialsInfo + * object. + * + * @param credentials the CredentialsInfo provided by the user. + * @return + */ + private static Credentials createCredentials(CredentialsInfo credentials) { + Credentials ret = new Credentials(); + try { + for (Map.Entry entry : credentials.getTokens() + .entrySet()) { + Text alias = new Text(entry.getKey()); + Token token = new Token(); + token.decodeFromUrlString(entry.getValue()); + ret.addToken(alias, token); + } + for (Map.Entry entry : credentials.getSecrets() + .entrySet()) { + Text alias = new Text(entry.getKey()); + Base64 decoder = new Base64(0, null, true); + byte[] secret = decoder.decode(entry.getValue()); + ret.addSecretKey(alias, secret); + } + } catch (IOException ie) { + throw new BadRequestException( + "Could not parse credentials data; exception message = " + + ie.getMessage()); + } + return ret; + } + + private static LogAggregationContext createLogAggregationContext( + LogAggregationContextInfo logAggregationContextInfo) { + return LogAggregationContext.newInstance( + logAggregationContextInfo.getIncludePattern(), + logAggregationContextInfo.getExcludePattern(), + logAggregationContextInfo.getRolledLogsIncludePattern(), + logAggregationContextInfo.getRolledLogsExcludePattern(), + logAggregationContextInfo.getLogAggregationPolicyClassName(), + logAggregationContextInfo.getLogAggregationPolicyParameters()); + } + + /** + * Helper method to retrieve the UserGroupInformation from the + * HttpServletRequest. + * + * @param hsr the servlet request + * @param usePrincipal true if we need to use the principal user, remote + * otherwise. + * @return the user group information of the caller. + **/ + public static UserGroupInformation getCallerUserGroupInformation( + HttpServletRequest hsr, boolean usePrincipal) { + + String remoteUser = hsr.getRemoteUser(); + if (usePrincipal) { + Principal princ = hsr.getUserPrincipal(); + remoteUser = princ == null ? null : princ.getName(); + } + + UserGroupInformation callerUGI = null; + if (remoteUser != null) { + callerUGI = UserGroupInformation.createRemoteUser(remoteUser); + } + + return callerUGI; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java index 93ab3de19d8..062ca4c8408 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java @@ -108,7 +108,7 @@ public interface RMWebServiceProtocol { * This method dumps the scheduler logs for the time got in input, and it is * reachable by using {@link RMWSConsts#SCHEDULER_LOGS}. * - * @param time the period of time + * @param time the period of time. It is a FormParam. * @param hsr the servlet request * @return the result of the operation * @throws IOException when it cannot create dump log file @@ -121,7 +121,7 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#NODES}. * * @see ApplicationClientProtocol#getClusterNodes - * @param states the states we want to filter + * @param states the states we want to filter. It is a QueryParam. * @return all nodes in the cluster. If the states param is given, returns all * nodes that are in the comma-separated list of states */ @@ -131,7 +131,8 @@ public interface RMWebServiceProtocol { * This method retrieves a specific node information, and it is reachable by * using {@link RMWSConsts#NODES_NODEID}. * - * @param nodeId the node we want to retrieve the information + * @param nodeId the node we want to retrieve the information. It is a + * PathParam. * @return the information about the node in input */ NodeInfo getNode(String nodeId); @@ -142,19 +143,25 @@ public interface RMWebServiceProtocol { * * @see ApplicationClientProtocol#getApplications * @param hsr the servlet request - * @param stateQuery right now the stateQuery is deprecated - * @param statesQuery filter the result by states - * @param finalStatusQuery filter the result by final states - * @param userQuery filter the result by user - * @param queueQuery filter the result by queue - * @param count set a limit of the result - * @param startedBegin filter the result by started begin time - * @param startedEnd filter the result by started end time - * @param finishBegin filter the result by finish begin time - * @param finishEnd filter the result by finish end time - * @param applicationTypes filter the result by types - * @param applicationTags filter the result by tags - * @param unselectedFields De-selected params to avoid from report + * @param stateQuery right now the stateQuery is deprecated. It is a + * QueryParam. + * @param statesQuery filter the result by states. It is a QueryParam. + * @param finalStatusQuery filter the result by final states. It is a + * QueryParam. + * @param userQuery filter the result by user. It is a QueryParam. + * @param queueQuery filter the result by queue. It is a QueryParam. + * @param count set a limit of the result. It is a QueryParam. + * @param startedBegin filter the result by started begin time. It is a + * QueryParam. + * @param startedEnd filter the result by started end time. It is a + * QueryParam. + * @param finishBegin filter the result by finish begin time. It is a + * QueryParam. + * @param finishEnd filter the result by finish end time. It is a QueryParam. + * @param applicationTypes filter the result by types. It is a QueryParam. + * @param applicationTags filter the result by tags. It is a QueryParam. + * @param unselectedFields De-selected params to avoid from report. It is a + * QueryParam. * @return all apps in the cluster */ @SuppressWarnings("checkstyle:parameternumber") @@ -169,7 +176,8 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#SCHEDULER_ACTIVITIES}. * * @param hsr the servlet request - * @param nodeId the node we want to retrieve the activities + * @param nodeId the node we want to retrieve the activities. It is a + * QueryParam. * @return all the activities in the specific node */ ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId); @@ -180,8 +188,10 @@ public interface RMWebServiceProtocol { * {@link RMWSConsts#SCHEDULER_APP_ACTIVITIES}. * * @param hsr the servlet request - * @param appId the applicationId we want to retrieve the activities - * @param time for how long we want to retrieve the activities + * @param appId the applicationId we want to retrieve the activities. It is a + * QueryParam. + * @param time for how long we want to retrieve the activities. It is a + * QueryParam. * @return all the activities about a specific app for a specific time */ AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId, @@ -192,8 +202,8 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#APP_STATISTICS}. * * @param hsr the servlet request - * @param stateQueries filter the result by states - * @param typeQueries filter the result by type names + * @param stateQueries filter the result by states. It is a QueryParam. + * @param typeQueries filter the result by type names. It is a QueryParam. * @return the application's statistics for specific states and types */ ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr, @@ -205,8 +215,10 @@ public interface RMWebServiceProtocol { * * @see ApplicationClientProtocol#getApplicationReport * @param hsr the servlet request - * @param appId the Id of the application we want the report - * @param unselectedFields De-selected param list to avoid from report + * @param appId the Id of the application we want the report. It is a + * PathParam. + * @param unselectedFields De-selected param list to avoid from report. It is + * a QueryParam. * @return the app report for a specific application */ AppInfo getApp(HttpServletRequest hsr, String appId, @@ -217,7 +229,8 @@ public interface RMWebServiceProtocol { * using {@link RMWSConsts#APPS_APPID_STATE}. * * @param hsr the servlet request - * @param appId the Id of the application we want the state + * @param appId the Id of the application we want the state. It is a + * PathParam. * @return the state for a specific application * @throws AuthorizationException if the user is not authorized */ @@ -228,9 +241,10 @@ public interface RMWebServiceProtocol { * This method updates the state of the app in input, and it is reachable by * using {@link RMWSConsts#APPS_APPID_STATE}. * - * @param targetState the target state for the app + * @param targetState the target state for the app. It is a content param. * @param hsr the servlet request - * @param appId the Id of the application we want to update the state + * @param appId the Id of the application we want to update the state. It is a + * PathParam. * @return Response containing the status code * @throws AuthorizationException if the user is not authorized to invoke this * method @@ -259,7 +273,7 @@ public interface RMWebServiceProtocol { * cluster, and it is reachable by using {@link RMWSConsts#LABEL_MAPPINGS}. * * @see ApplicationClientProtocol#getLabelsToNodes - * @param labels filter the result by node labels + * @param labels filter the result by node labels. It is a QueryParam. * @return all the nodes within multiple node labels * @throws IOException if an IOException happened */ @@ -270,7 +284,7 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#REPLACE_NODE_TO_LABELS}. * * @see ResourceManagerAdministrationProtocol#replaceLabelsOnNode - * @param newNodeToLabels the list of new labels + * @param newNodeToLabels the list of new labels. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws Exception if an exception happened @@ -283,9 +297,10 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#NODES_NODEID_REPLACE_LABELS}. * * @see ResourceManagerAdministrationProtocol#replaceLabelsOnNode - * @param newNodeLabelsName the list of new labels + * @param newNodeLabelsName the list of new labels. It is a QueryParam. * @param hsr the servlet request - * @param nodeId the node we want to replace the node labels + * @param nodeId the node we want to replace the node labels. It is a + * PathParam. * @return Response containing the status code * @throws Exception if an exception happened */ @@ -309,7 +324,7 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#ADD_NODE_LABELS}. * * @see ResourceManagerAdministrationProtocol#addToClusterNodeLabels - * @param newNodeLabels the node labels to add + * @param newNodeLabels the node labels to add. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws Exception in case of bad request @@ -322,7 +337,7 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#REMOVE_NODE_LABELS}. * * @see ResourceManagerAdministrationProtocol#removeFromClusterNodeLabels - * @param oldNodeLabels the node labels to remove + * @param oldNodeLabels the node labels to remove. It is a QueryParam. * @param hsr the servlet request * @return Response containing the status code * @throws Exception in case of bad request @@ -335,7 +350,8 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#NODES_NODEID_GETLABELS}. * * @param hsr the servlet request - * @param nodeId the node we want to get all the node labels + * @param nodeId the node we want to get all the node labels. It is a + * PathParam. * @return all the labels for a specific node. * @throws IOException if an IOException happened */ @@ -347,7 +363,7 @@ public interface RMWebServiceProtocol { * by using {@link RMWSConsts#APPS_APPID_PRIORITY}. * * @param hsr the servlet request - * @param appId the app we want to get the priority + * @param appId the app we want to get the priority. It is a PathParam. * @return the priority for a specific application * @throws AuthorizationException in case of the user is not authorized */ @@ -358,9 +374,11 @@ public interface RMWebServiceProtocol { * This method updates the priority for a specific application, and it is * reachable by using {@link RMWSConsts#APPS_APPID_PRIORITY}. * - * @param targetPriority the priority we want to set for the app + * @param targetPriority the priority we want to set for the app. It is a + * content param. * @param hsr the servlet request - * @param appId the application we want to update its priority + * @param appId the application we want to update its priority. It is a + * PathParam. * @return Response containing the status code * @throws AuthorizationException if the user is not authenticated * @throws YarnException if the target is null @@ -376,7 +394,8 @@ public interface RMWebServiceProtocol { * using {@link RMWSConsts#APPS_APPID_QUEUE}. * * @param hsr the servlet request - * @param appId the application we want to retrieve its queue + * @param appId the application we want to retrieve its queue. It is a + * PathParam. * @return the Queue for a specific application. * @throws AuthorizationException if the user is not authenticated */ @@ -387,9 +406,10 @@ public interface RMWebServiceProtocol { * This method updates the queue for a specific application, and it is * reachable by using {@link RMWSConsts#APPS_APPID_QUEUE}. * - * @param targetQueue the queue we want to set + * @param targetQueue the queue we want to set. It is a content param. * @param hsr the servlet request - * @param appId the application we want to change its queue + * @param appId the application we want to change its queue. It is a + * PathParam. * @return Response containing the status code * @throws AuthorizationException if the user is not authenticated * @throws YarnException if the app is not found @@ -424,7 +444,7 @@ public interface RMWebServiceProtocol { * @see ApplicationClientProtocol#submitApplication * * @param newApp structure containing information to construct the - * ApplicationSubmissionContext + * ApplicationSubmissionContext. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws AuthorizationException if the user is not authorized to invoke this @@ -441,7 +461,7 @@ public interface RMWebServiceProtocol { * by using {@link RMWSConsts#DELEGATION_TOKEN}. * * @see ApplicationBaseProtocol#getDelegationToken - * @param tokenData the token to delegate + * @param tokenData the token to delegate. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws AuthorizationException if Kerberos auth failed @@ -508,7 +528,7 @@ public interface RMWebServiceProtocol { * @see ApplicationClientProtocol#submitReservation * * @param resContext provides information to construct the - * ReservationSubmissionRequest + * ReservationSubmissionRequest. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws AuthorizationException if the user is not authorized to invoke this @@ -527,7 +547,7 @@ public interface RMWebServiceProtocol { * @see ApplicationClientProtocol#updateReservation * * @param resContext provides information to construct the - * ReservationUpdateRequest + * ReservationUpdateRequest. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws AuthorizationException if the user is not authorized to invoke this @@ -546,7 +566,7 @@ public interface RMWebServiceProtocol { * @see ApplicationClientProtocol#deleteReservation * * @param resContext provides information to construct the - * ReservationDeleteRequest + * ReservationDeleteRequest. It is a content param. * @param hsr the servlet request * @return Response containing the status code * @throws AuthorizationException when the user group information cannot be @@ -566,12 +586,13 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#RESERVATION_LIST}. * * @see ApplicationClientProtocol#listReservations - * @param queue filter the result by queue - * @param reservationId filter the result by reservationId - * @param startTime filter the result by start time - * @param endTime filter the result by end time + * @param queue filter the result by queue. It is a QueryParam. + * @param reservationId filter the result by reservationId. It is a + * QueryParam. + * @param startTime filter the result by start time. It is a QueryParam. + * @param endTime filter the result by end time. It is a QueryParam. * @param includeResourceAllocations true if the resource allocation should be - * in the result, false otherwise + * in the result, false otherwise. It is a QueryParam. * @param hsr the servlet request * @return Response containing the status code * @throws Exception in case of bad request @@ -586,8 +607,8 @@ public interface RMWebServiceProtocol { * {@link RMWSConsts#APPS_TIMEOUTS_TYPE}. * * @param hsr the servlet request - * @param appId the application we want to get the timeout - * @param type the type of the timeouts + * @param appId the application we want to get the timeout. It is a PathParam. + * @param type the type of the timeouts. It is a PathParam. * @return the timeout for a specific application with a specific type. * @throws AuthorizationException if the user is not authorized */ @@ -599,7 +620,8 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#APPS_TIMEOUTS}. * * @param hsr the servlet request - * @param appId the application we want to get the timeouts + * @param appId the application we want to get the timeouts. It is a + * PathParam. * @return the timeouts for a specific application * @throws AuthorizationException if the user is not authorized */ @@ -611,9 +633,9 @@ public interface RMWebServiceProtocol { * reachable by using {@link RMWSConsts#APPS_TIMEOUT}. * * @see ApplicationClientProtocol#updateApplicationTimeouts - * @param appTimeout the appTimeoutInfo + * @param appTimeout the appTimeoutInfo. It is a content param. * @param hsr the servlet request - * @param appId the application we want to update + * @param appId the application we want to update. It is a PathParam. * @return Response containing the status code * @throws AuthorizationException if the user is not authorized to invoke this * method @@ -631,7 +653,8 @@ public interface RMWebServiceProtocol { * * @see ApplicationBaseProtocol#getApplicationAttempts * @param hsr the servlet request - * @param appId the application we want to get the attempts + * @param appId the application we want to get the attempts. It is a + * PathParam. * @return all the attempts info for a specific application */ AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 7c053bf3880..6ce47defd89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; -import java.nio.ByteBuffer; import java.security.AccessControlException; import java.security.Principal; import java.security.PrivilegedExceptionAction; @@ -57,22 +56,18 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; -import org.apache.commons.codec.binary.Base64; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.http.JettyUtils; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; @@ -104,10 +99,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType; -import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; -import org.apache.hadoop.yarn.api.records.LocalResource; -import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.NodeState; @@ -119,7 +111,6 @@ import org.apache.hadoop.yarn.api.records.ReservationRequest; import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -136,33 +127,34 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; -import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CredentialsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; -import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo; -import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LogAggregationContextInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewReservation; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; @@ -185,7 +177,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo; -import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.webapp.WebServices; @@ -445,7 +436,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol { @QueryParam(RMWSConsts.FINISHED_TIME_END) String finishEnd, @QueryParam(RMWSConsts.APPLICATION_TYPES) Set applicationTypes, @QueryParam(RMWSConsts.APPLICATION_TAGS) Set applicationTags, - @QueryParam("deSelects") Set unselectedFields) { + @QueryParam(RMWSConsts.DESELECTS) Set unselectedFields) { boolean checkCount = false; boolean checkStart = false; boolean checkEnd = false; @@ -832,7 +823,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol { @Override public AppInfo getApp(@Context HttpServletRequest hsr, @PathParam(RMWSConsts.APPID) String appId, - @QueryParam("deSelects") Set unselectedFields) { + @QueryParam(RMWSConsts.DESELECTS) Set unselectedFields) { init(); ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId); RMApp app = rm.getRMContext().getRMApps().get(id); @@ -1589,7 +1580,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol { } ApplicationSubmissionContext appContext = - createAppSubmissionContext(newApp); + RMWebAppUtil.createAppSubmissionContext(newApp, conf); final SubmitApplicationRequest req = SubmitApplicationRequest.newInstance(appContext); @@ -1640,153 +1631,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol { return appId; } - /** - * Create the actual ApplicationSubmissionContext to be submitted to the RM - * from the information provided by the user. - * - * @param newApp the information provided by the user - * @return returns the constructed ApplicationSubmissionContext - * @throws IOException - */ - protected ApplicationSubmissionContext createAppSubmissionContext( - ApplicationSubmissionContextInfo newApp) throws IOException { - - // create local resources and app submission context - - ApplicationId appid; - String error = - "Could not parse application id " + newApp.getApplicationId(); - try { - appid = ApplicationId.fromString(newApp.getApplicationId()); - } catch (Exception e) { - throw new BadRequestException(error); - } - ApplicationSubmissionContext appContext = ApplicationSubmissionContext - .newInstance(appid, newApp.getApplicationName(), newApp.getQueue(), - Priority.newInstance(newApp.getPriority()), - createContainerLaunchContext(newApp), newApp.getUnmanagedAM(), - newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(), - createAppSubmissionContextResource(newApp), - newApp.getApplicationType(), - newApp.getKeepContainersAcrossApplicationAttempts(), - newApp.getAppNodeLabelExpression(), - newApp.getAMContainerNodeLabelExpression()); - appContext.setApplicationTags(newApp.getApplicationTags()); - appContext.setAttemptFailuresValidityInterval( - newApp.getAttemptFailuresValidityInterval()); - if (newApp.getLogAggregationContextInfo() != null) { - appContext.setLogAggregationContext( - createLogAggregationContext(newApp.getLogAggregationContextInfo())); - } - String reservationIdStr = newApp.getReservationId(); - if (reservationIdStr != null && !reservationIdStr.isEmpty()) { - ReservationId reservationId = - ReservationId.parseReservationId(reservationIdStr); - appContext.setReservationID(reservationId); - } - return appContext; - } - - protected Resource createAppSubmissionContextResource( - ApplicationSubmissionContextInfo newApp) throws BadRequestException { - if (newApp.getResource().getvCores() > rm.getConfig().getInt( - YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) { - String msg = "Requested more cores than configured max"; - throw new BadRequestException(msg); - } - if (newApp.getResource().getMemorySize() > rm.getConfig().getInt( - YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) { - String msg = "Requested more memory than configured max"; - throw new BadRequestException(msg); - } - Resource r = Resource.newInstance(newApp.getResource().getMemorySize(), - newApp.getResource().getvCores()); - return r; - } - - /** - * Create the ContainerLaunchContext required for the - * ApplicationSubmissionContext. This function takes the user information and - * generates the ByteBuffer structures required by the ContainerLaunchContext - * - * @param newApp the information provided by the user - * @return created context - * @throws BadRequestException - * @throws IOException - */ - protected ContainerLaunchContext createContainerLaunchContext( - ApplicationSubmissionContextInfo newApp) - throws BadRequestException, IOException { - - // create container launch context - - HashMap hmap = new HashMap(); - for (Map.Entry entry : newApp - .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) { - if (entry.getValue().isEmpty() == false) { - Base64 decoder = new Base64(0, null, true); - byte[] data = decoder.decode(entry.getValue()); - hmap.put(entry.getKey(), ByteBuffer.wrap(data)); - } - } - - HashMap hlr = new HashMap(); - for (Map.Entry entry : newApp - .getContainerLaunchContextInfo().getResources().entrySet()) { - LocalResourceInfo l = entry.getValue(); - LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()), - l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp()); - hlr.put(entry.getKey(), lr); - } - - DataOutputBuffer out = new DataOutputBuffer(); - Credentials cs = createCredentials( - newApp.getContainerLaunchContextInfo().getCredentials()); - cs.writeTokenStorageToStream(out); - ByteBuffer tokens = ByteBuffer.wrap(out.getData()); - - ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr, - newApp.getContainerLaunchContextInfo().getEnvironment(), - newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens, - newApp.getContainerLaunchContextInfo().getAcls()); - - return ctx; - } - - /** - * Generate a Credentials object from the information in the CredentialsInfo - * object. - * - * @param credentials the CredentialsInfo provided by the user. - * @return - */ - private Credentials createCredentials(CredentialsInfo credentials) { - Credentials ret = new Credentials(); - try { - for (Map.Entry entry : credentials.getTokens() - .entrySet()) { - Text alias = new Text(entry.getKey()); - Token token = new Token(); - token.decodeFromUrlString(entry.getValue()); - ret.addToken(alias, token); - } - for (Map.Entry entry : credentials.getSecrets() - .entrySet()) { - Text alias = new Text(entry.getKey()); - Base64 decoder = new Base64(0, null, true); - byte[] secret = decoder.decode(entry.getValue()); - ret.addSecretKey(alias, secret); - } - } catch (IOException ie) { - throw new BadRequestException( - "Could not parse credentials data; exception message = " - + ie.getMessage()); - } - return ret; - } - private UserGroupInformation createKerberosUserGroupInformation( HttpServletRequest hsr) throws AuthorizationException, YarnException { @@ -1815,17 +1659,6 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol { return callerUGI; } - private LogAggregationContext createLogAggregationContext( - LogAggregationContextInfo logAggregationContextInfo) { - return LogAggregationContext.newInstance( - logAggregationContextInfo.getIncludePattern(), - logAggregationContextInfo.getExcludePattern(), - logAggregationContextInfo.getRolledLogsIncludePattern(), - logAggregationContextInfo.getRolledLogsExcludePattern(), - logAggregationContextInfo.getLogAggregationPolicyClassName(), - logAggregationContextInfo.getLogAggregationPolicyParameters()); - } - @POST @Path(RMWSConsts.DELEGATION_TOKEN) @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java index beb0cca235d..d81e64ddac7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java @@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.webapp.YarnWebParams; * because of a redirection issue. */ public class RedirectionErrorPage extends RmView { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { String aid = $(YarnWebParams.APPLICATION_ID); commonPreHead(html); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java index 1a437f834b7..fc844f9e70f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java @@ -33,7 +33,7 @@ public class RmView extends TwoColumnLayout { static final int MAX_FAST_ROWS = 1000; // inline js array @Override - protected void preHead(Page.HTML<_> html) { + protected void preHead(Page.HTML<__> html) { commonPreHead(html); set(DATATABLES_ID, "apps"); set(initID(DATATABLES, "apps"), initAppsTable()); @@ -45,7 +45,7 @@ public class RmView extends TwoColumnLayout { setTitle(sjoin(reqState, "Applications")); } - protected void commonPreHead(Page.HTML<_> html) { + protected void commonPreHead(Page.HTML<__> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java index 99c05656f3a..8c2d2711872 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/SchedulerPageUtil.java @@ -27,7 +27,7 @@ public class SchedulerPageUtil { private void reopenQueue(Block html) { html. script().$type("text/javascript"). - _("function reopenQueryNodes() {", + __("function reopenQueryNodes() {", " var currentParam = window.location.href.split('?');", " var tmpCurrentParam = currentParam;", " var queryQueuesString = '';", @@ -52,13 +52,13 @@ public class SchedulerPageUtil { " 'open_node.jstree' :function(e, data) { storeExpandedQueue(e, data); },", " 'close_node.jstree':function(e, data) { storeExpandedQueue(e, data); }", " });", - "}")._(); + "}").__(); } private void storeExpandedQueue (Block html) { html. script().$type("text/javascript"). - _("function storeExpandedQueue(e, data) {", + __("function storeExpandedQueue(e, data) {", " var OPEN_QUEUES = 'openQueues';", " var ACTION_OPEN = 'open';", " var ACTION_CLOSED = 'closed';", @@ -166,7 +166,7 @@ public class SchedulerPageUtil { " queryString = queryString + '#' + queueName;", " }", " return queryString;", - "}")._(); + "}").__(); } @Override protected void render(Block html) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java index 55bf999b047..82a946e4c9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java @@ -27,7 +27,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @XmlRootElement(name = "appAttempt") @@ -106,4 +105,8 @@ public class AppAttemptInfo { public String getLogsLink() { return this.logsLink; } + + public String getAppAttemptId() { + return this.appAttemptId; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index f083b057065..dc42eb63846 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -65,7 +65,10 @@ public class ClusterMetricsInfo { } // JAXB needs this public ClusterMetricsInfo(final ResourceManager rm) { - ResourceScheduler rs = rm.getResourceScheduler(); + this(rm.getResourceScheduler()); + } + + public ClusterMetricsInfo(final ResourceScheduler rs) { QueueMetrics metrics = rs.getRootQueueMetrics(); ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java index fa14bae9673..913513c52ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java @@ -28,6 +28,7 @@ import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.bind.annotation.XmlTransient; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; @@ -47,20 +48,19 @@ public class FairSchedulerQueueInfo { @XmlTransient private float fractionMemFairShare; @XmlTransient - private float fractionMemMinShare; - @XmlTransient private float fractionMemMaxShare; private ResourceInfo minResources; private ResourceInfo maxResources; private ResourceInfo usedResources; + private ResourceInfo amUsedResources; + private ResourceInfo amMaxResources; private ResourceInfo demandResources; private ResourceInfo steadyFairResources; private ResourceInfo fairResources; private ResourceInfo clusterResources; private ResourceInfo reservedResources; - private long pendingContainers; private long allocatedContainers; private long reservedContainers; @@ -82,6 +82,12 @@ public class FairSchedulerQueueInfo { clusterResources = new ResourceInfo(scheduler.getClusterResource()); + amUsedResources = new ResourceInfo(Resource.newInstance( + queue.getMetrics().getAMResourceUsageMB(), + queue.getMetrics().getAMResourceUsageVCores())); + amMaxResources = new ResourceInfo(Resource.newInstance( + queue.getMetrics().getMaxAMShareMB(), + queue.getMetrics().getMaxAMShareVCores())); usedResources = new ResourceInfo(queue.getResourceUsage()); demandResources = new ResourceInfo(queue.getDemand()); fractionMemUsed = (float)usedResources.getMemorySize() / @@ -90,7 +96,6 @@ public class FairSchedulerQueueInfo { steadyFairResources = new ResourceInfo(queue.getSteadyFairShare()); fairResources = new ResourceInfo(queue.getFairShare()); minResources = new ResourceInfo(queue.getMinShare()); - maxResources = new ResourceInfo(queue.getMaxShare()); maxResources = new ResourceInfo( Resources.componentwiseMin(queue.getMaxShare(), scheduler.getClusterResource())); @@ -100,12 +105,10 @@ public class FairSchedulerQueueInfo { (float)steadyFairResources.getMemorySize() / clusterResources.getMemorySize(); fractionMemFairShare = (float) fairResources.getMemorySize() / clusterResources.getMemorySize(); - fractionMemMinShare = (float)minResources.getMemorySize() / clusterResources.getMemorySize(); fractionMemMaxShare = (float)maxResources.getMemorySize() / clusterResources.getMemorySize(); maxApps = queue.getMaxRunningApps(); - pendingContainers = queue.getMetrics().getPendingContainers(); allocatedContainers = queue.getMetrics().getAllocatedContainers(); reservedContainers = queue.getMetrics().getReservedContainers(); @@ -118,10 +121,6 @@ public class FairSchedulerQueueInfo { childQueues = getChildQueues(queue, scheduler); } - public long getPendingContainers() { - return pendingContainers; - } - public long getAllocatedContainers() { return allocatedContainers; } @@ -204,6 +203,20 @@ public class FairSchedulerQueueInfo { return usedResources; } + /** + * @return the am used resource of this queue. + */ + public ResourceInfo getAMUsedResources() { + return amUsedResources; + } + + /** + * @return the am max resource of this queue. + */ + public ResourceInfo getAMMaxResources() { + return amMaxResources; + } + /** * @return the demand resource of this queue. */ @@ -211,14 +224,6 @@ public class FairSchedulerQueueInfo { return demandResources; } - /** - * Returns the queue's min share in as a fraction of the entire - * cluster capacity. - */ - public float getMinShareMemoryFraction() { - return fractionMemMinShare; - } - /** * Returns the memory used by this queue as a fraction of the entire * cluster capacity. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 91170d1ad1c..7f5871103b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -242,7 +242,7 @@ public class MockNodes { } @Override - public void updateNodeHeartbeatResponseForContainersDecreasing( + public void updateNodeHeartbeatResponseForUpdatedContainers( NodeHeartbeatResponse response) { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index 2fc4b3f4696..ee974e3389f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -26,6 +26,8 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest; @@ -306,12 +308,19 @@ public class NodeManager implements ContainerManagementProtocol { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException, IOException { return null; } + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return null; + } + public static org.apache.hadoop.yarn.server.api.records.NodeStatus createNodeStatus(NodeId nodeId, List containers) { RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java index c95bcdfca97..4d8b20d69fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java @@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.ClientBaseWithFixes; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -61,8 +62,8 @@ public abstract class RMHATestBase extends ClientBaseWithFixes{ configuration.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); configuration.set(YarnConfiguration.RM_STORE, ZKRMStateStore.class.getName()); - configuration.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort); - configuration.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); + configuration.set(CommonConfigurationKeys.ZK_ADDRESS, hostPort); + configuration.setInt(CommonConfigurationKeys.ZK_TIMEOUT_MS, ZK_TIMEOUT_MS); configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); configuration.set(YarnConfiguration.RM_CLUSTER_ID, "test-yarn-cluster"); int base = 100; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java index b3d4d344294..1acf658b229 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java @@ -42,6 +42,8 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -156,12 +158,19 @@ public class TestAMAuthorization { return GetContainerStatusesResponse.newInstance(null, null); } + @Deprecated @Override public IncreaseContainersResourceResponse increaseContainersResource(IncreaseContainersResourceRequest request) throws YarnException { return IncreaseContainersResourceResponse.newInstance(null, null); } + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return ContainerUpdateResponse.newInstance(null, null); + } + public Credentials getContainerCredentials() throws IOException { Credentials credentials = new Credentials(); DataInputByteBuffer buf = new DataInputByteBuffer(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java index 9e8401027e9..172993b8176 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterLauncher.java @@ -34,6 +34,8 @@ import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.CommitResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -73,6 +75,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMaste import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.utils.AMRMClientUtils; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -147,6 +150,7 @@ public class TestApplicationMasterLauncher { } @Override + @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( IncreaseContainersResourceRequest request) throws YarnException { @@ -189,6 +193,12 @@ public class TestApplicationMasterLauncher { throws YarnException, IOException { return null; } + + @Override + public ContainerUpdateResponse updateContainer(ContainerUpdateRequest + request) throws YarnException, IOException { + return null; + } } @Test @@ -338,9 +348,8 @@ public class TestApplicationMasterLauncher { am.registerAppAttempt(false); Assert.fail(); } catch (Exception e) { - Assert.assertEquals("Application Master is already registered : " - + attempt.getAppAttemptId().getApplicationId(), - e.getMessage()); + Assert.assertEquals(AMRMClientUtils.APP_ALREADY_REGISTERED_MESSAGE + + attempt.getAppAttemptId().getApplicationId(), e.getMessage()); } // Simulate an AM that was disconnected and app attempt was removed diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 678daa3e240..9ce02bcc92a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -1612,8 +1612,8 @@ public class TestClientRMService { ReservationRequests reservationRequests = response.getReservationAllocationState().get(0) .getReservationDefinition().getReservationRequests(); - Assert.assertTrue( - reservationRequests.getInterpreter().toString().equals("R_ALL")); + Assert.assertEquals("R_ALL", + reservationRequests.getInterpreter().toString()); Assert.assertTrue(reservationRequests.getReservationResources().get(0) .getDuration() == duration); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java index 6819395db5e..b885118810b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java @@ -43,11 +43,13 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; +import org.apache.hadoop.yarn.api.records.UpdatedContainer; import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterDistributedSchedulingAMResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; @@ -122,6 +124,21 @@ public class TestOpportunisticContainerAllocatorAMService { rm.start(); } + public void createAndStartRMWithAutoUpdateContainer() { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + YarnConfiguration conf = new YarnConfiguration(csConf); + conf.setBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS, true); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + conf.setBoolean( + YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true); + conf.setInt( + YarnConfiguration.NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS, 100); + rm = new MockRM(conf); + rm.start(); + } + @After public void stopRM() { if (rm != null) { @@ -548,6 +565,157 @@ public class TestOpportunisticContainerAllocatorAMService { verifyMetrics(metrics, 7168, 7, 1024, 1, 1); } + @Test(timeout = 600000) + public void testContainerAutoUpdateContainer() throws Exception { + rm.stop(); + createAndStartRMWithAutoUpdateContainer(); + MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService()); + nm1.registerNode(); + + OpportunisticContainerAllocatorAMService amservice = + (OpportunisticContainerAllocatorAMService) rm + .getApplicationMasterService(); + RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default"); + ApplicationAttemptId attemptId = + app1.getCurrentAppAttempt().getAppAttemptId(); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); + ResourceScheduler scheduler = rm.getResourceScheduler(); + RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId()); + + nm1.nodeHeartbeat(true); + + ((RMNodeImpl) rmNode1) + .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100)); + + OpportunisticContainerContext ctxt = + ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) + .getOpportunisticContainerContext(); + // Send add and update node events to AM Service. + amservice.handle(new NodeAddedSchedulerEvent(rmNode1)); + amservice.handle(new NodeUpdateSchedulerEvent(rmNode1)); + + nm1.nodeHeartbeat(true); + Thread.sleep(1000); + + AllocateResponse allocateResponse = am1.allocate(Arrays.asList( + ResourceRequest.newInstance(Priority.newInstance(1), "*", + Resources.createResource(1 * GB), 2, true, null, + ExecutionTypeRequest + .newInstance(ExecutionType.OPPORTUNISTIC, true))), null); + List allocatedContainers = + allocateResponse.getAllocatedContainers(); + Assert.assertEquals(2, allocatedContainers.size()); + Container container = allocatedContainers.get(0); + // Start Container in NM + nm1.nodeHeartbeat(Arrays.asList(ContainerStatus + .newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, + ContainerState.RUNNING, "", 0)), true); + Thread.sleep(200); + + // Verify that container is actually running wrt the RM.. + RMContainer rmContainer = ((CapacityScheduler) scheduler) + .getApplicationAttempt(container.getId().getApplicationAttemptId()) + .getRMContainer(container.getId()); + Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); + + // Send Promotion req... this should result in update error + // Since the container doesn't exist anymore.. + allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( + UpdateContainerRequest.newInstance(0, container.getId(), + ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, + ExecutionType.GUARANTEED))); + + nm1.nodeHeartbeat(Arrays.asList(ContainerStatus + .newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, + ContainerState.RUNNING, "", 0)), true); + Thread.sleep(200); + // Get the update response on next allocate + allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); + // Check the update response from YARNRM + Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); + UpdatedContainer uc = allocateResponse.getUpdatedContainers().get(0); + Assert.assertEquals(container.getId(), uc.getContainer().getId()); + Assert.assertEquals(ExecutionType.GUARANTEED, + uc.getContainer().getExecutionType()); + // Check that the container is updated in NM through NM heartbeat response + NodeHeartbeatResponse response = nm1.nodeHeartbeat(true); + Assert.assertEquals(1, response.getContainersToUpdate().size()); + Container containersFromNM = response.getContainersToUpdate().get(0); + Assert.assertEquals(container.getId(), containersFromNM.getId()); + Assert.assertEquals(ExecutionType.GUARANTEED, + containersFromNM.getExecutionType()); + + //Increase resources + allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( + UpdateContainerRequest.newInstance(1, container.getId(), + ContainerUpdateType.INCREASE_RESOURCE, + Resources.createResource(2 * GB, 1), null))); + response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus + .newInstance(container.getId(), ExecutionType.GUARANTEED, + ContainerState.RUNNING, "", 0)), true); + + Thread.sleep(200); + if (allocateResponse.getUpdatedContainers().size() == 0) { + allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); + } + Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); + uc = allocateResponse.getUpdatedContainers().get(0); + Assert.assertEquals(container.getId(), uc.getContainer().getId()); + Assert.assertEquals(Resource.newInstance(2 * GB, 1), + uc.getContainer().getResource()); + + // Check that the container resources are increased in + // NM through NM heartbeat response + if (response.getContainersToUpdate().size() == 0) { + response = nm1.nodeHeartbeat(true); + } + Assert.assertEquals(1, response.getContainersToUpdate().size()); + Assert.assertEquals(Resource.newInstance(2 * GB, 1), + response.getContainersToUpdate().get(0).getResource()); + + //Decrease resources + allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( + UpdateContainerRequest.newInstance(2, container.getId(), + ContainerUpdateType.DECREASE_RESOURCE, + Resources.createResource(1 * GB, 1), null))); + Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); + + // Check that the container resources are decreased + // in NM through NM heartbeat response + response = nm1.nodeHeartbeat(true); + Assert.assertEquals(1, response.getContainersToUpdate().size()); + Assert.assertEquals(Resource.newInstance(1 * GB, 1), + response.getContainersToUpdate().get(0).getResource()); + + nm1.nodeHeartbeat(true); + // DEMOTE the container + allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList( + UpdateContainerRequest.newInstance(3, container.getId(), + ContainerUpdateType.DEMOTE_EXECUTION_TYPE, null, + ExecutionType.OPPORTUNISTIC))); + + response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus + .newInstance(container.getId(), ExecutionType.GUARANTEED, + ContainerState.RUNNING, "", 0)), true); + Thread.sleep(200); + if (allocateResponse.getUpdatedContainers().size() == 0) { + // Get the update response on next allocate + allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>()); + } + // Check the update response from YARNRM + Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size()); + uc = allocateResponse.getUpdatedContainers().get(0); + Assert.assertEquals(ExecutionType.OPPORTUNISTIC, + uc.getContainer().getExecutionType()); + // Check that the container is updated in NM through NM heartbeat response + if (response.getContainersToUpdate().size() == 0) { + response = nm1.nodeHeartbeat(true); + } + Assert.assertEquals(1, response.getContainersToUpdate().size()); + Assert.assertEquals(ExecutionType.OPPORTUNISTIC, + response.getContainersToUpdate().get(0).getExecutionType()); + } + private void verifyMetrics(QueueMetrics metrics, long availableMB, int availableVirtualCores, long allocatedMB, int allocatedVirtualCores, int allocatedContainers) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index f9d0eae29e2..3130ad1b4d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -2551,14 +2551,14 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase { } }; - // rm should successfully start with app1 loaded back in FAILED state - // due to node label not enabled but am resource request contains - // node label expression. + // rm should successfully start with app1 loaded back in SUCCESS state + // by pushing app to run default label for am container and let other + // containers to run normally. + try { rm2.start(); Assert.assertTrue("RM start successfully", true); Assert.assertEquals(1, rm2.getRMContext().getRMApps().size()); - rm2.waitForState(app1.getApplicationId(), RMAppState.FAILED); } catch (Exception e) { LOG.debug("Exception on start", e); Assert.fail("RM should start without any issue"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index d02822791b2..2c37f44e416 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -1324,11 +1324,11 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase nm1.nodeHeartbeat(true); List conts = am0.allocate(new ArrayList(), new ArrayList()).getAllocatedContainers(); - while (conts.size() == 0) { + while (conts.size() < 2) { nm1.nodeHeartbeat(true); conts.addAll(am0.allocate(new ArrayList(), new ArrayList()).getAllocatedContainers()); - Thread.sleep(500); + Thread.sleep(100); } // am failed,and relaunch it @@ -1426,14 +1426,12 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase nm1.nodeHeartbeat(true); List conts = am0.allocate(new ArrayList(), new ArrayList()).getAllocatedContainers(); - Assert.assertTrue(conts.isEmpty()); - while (conts.size() == 0) { + while (conts.size() < 2) { nm1.nodeHeartbeat(true); conts.addAll(am0.allocate(new ArrayList(), new ArrayList()).getAllocatedContainers()); - Thread.sleep(500); + Thread.sleep(100); } - Assert.assertFalse(conts.isEmpty()); // start new RM rm2 = new MockRM(conf, rm1.getRMStateStore()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java new file mode 100644 index 00000000000..e5e156dcf76 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.federation; + +import java.io.IOException; +import java.io.StringReader; +import java.net.UnknownHostException; + +import javax.xml.bind.JAXBException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.FederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest; +import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.sun.jersey.api.json.JSONUnmarshaller; + +/** + * Unit tests for FederationStateStoreService. + */ +public class TestFederationRMStateStoreService { + + private final HAServiceProtocol.StateChangeRequestInfo requestInfo = + new HAServiceProtocol.StateChangeRequestInfo( + HAServiceProtocol.RequestSource.REQUEST_BY_USER); + private final SubClusterId subClusterId = SubClusterId.newInstance("SC-1"); + private final GetSubClusterInfoRequest request = + GetSubClusterInfoRequest.newInstance(subClusterId); + + private Configuration conf; + private FederationStateStore stateStore; + private long lastHearbeatTS = 0; + private JSONJAXBContext jc; + private JSONUnmarshaller unmarshaller; + + @Before + public void setUp() throws IOException, YarnException, JAXBException { + conf = new YarnConfiguration(); + jc = new JSONJAXBContext( + JSONConfiguration.mapped().rootUnwrapping(false).build(), + ClusterMetricsInfo.class); + unmarshaller = jc.createJSONUnmarshaller(); + } + + @After + public void tearDown() throws Exception { + unmarshaller = null; + jc = null; + } + + @Test + public void testFederationStateStoreService() throws Exception { + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId()); + final MockRM rm = new MockRM(conf); + + // Initially there should be no entry for the sub-cluster + rm.init(conf); + stateStore = rm.getFederationStateStoreService().getStateStoreClient(); + GetSubClusterInfoResponse response = stateStore.getSubCluster(request); + Assert.assertNull(response); + + // Validate if sub-cluster is registered + rm.start(); + String capability = checkSubClusterInfo(SubClusterState.SC_NEW); + Assert.assertTrue(capability.isEmpty()); + + // Heartbeat to see if sub-cluster transitions to running + FederationStateStoreHeartbeat storeHeartbeat = + rm.getFederationStateStoreService().getStateStoreHeartbeatThread(); + storeHeartbeat.run(); + capability = checkSubClusterInfo(SubClusterState.SC_RUNNING); + checkClusterMetricsInfo(capability, 0); + + // heartbeat again after adding a node. + rm.registerNode("127.0.0.1:1234", 4 * 1024); + storeHeartbeat.run(); + capability = checkSubClusterInfo(SubClusterState.SC_RUNNING); + checkClusterMetricsInfo(capability, 1); + + // Validate sub-cluster deregistration + rm.getFederationStateStoreService() + .deregisterSubCluster(SubClusterDeregisterRequest + .newInstance(subClusterId, SubClusterState.SC_UNREGISTERED)); + checkSubClusterInfo(SubClusterState.SC_UNREGISTERED); + + // check after failover + explicitFailover(rm); + + capability = checkSubClusterInfo(SubClusterState.SC_NEW); + Assert.assertTrue(capability.isEmpty()); + + // Heartbeat to see if sub-cluster transitions to running + storeHeartbeat = + rm.getFederationStateStoreService().getStateStoreHeartbeatThread(); + storeHeartbeat.run(); + capability = checkSubClusterInfo(SubClusterState.SC_RUNNING); + checkClusterMetricsInfo(capability, 0); + + // heartbeat again after adding a node. + rm.registerNode("127.0.0.1:1234", 4 * 1024); + storeHeartbeat.run(); + capability = checkSubClusterInfo(SubClusterState.SC_RUNNING); + checkClusterMetricsInfo(capability, 1); + + rm.stop(); + } + + private void explicitFailover(MockRM rm) throws IOException { + rm.getAdminService().transitionToStandby(requestInfo); + Assert.assertTrue(rm.getRMContext() + .getHAServiceState() == HAServiceProtocol.HAServiceState.STANDBY); + rm.getAdminService().transitionToActive(requestInfo); + Assert.assertTrue(rm.getRMContext() + .getHAServiceState() == HAServiceProtocol.HAServiceState.ACTIVE); + lastHearbeatTS = 0; + stateStore = rm.getFederationStateStoreService().getStateStoreClient(); + } + + private void checkClusterMetricsInfo(String capability, int numNodes) + throws JAXBException { + ClusterMetricsInfo clusterMetricsInfo = unmarshaller.unmarshalFromJSON( + new StringReader(capability), ClusterMetricsInfo.class); + Assert.assertEquals(numNodes, clusterMetricsInfo.getTotalNodes()); + } + + private String checkSubClusterInfo(SubClusterState state) + throws YarnException, UnknownHostException { + Assert.assertNotNull(stateStore.getSubCluster(request)); + SubClusterInfo response = + stateStore.getSubCluster(request).getSubClusterInfo(); + Assert.assertEquals(state, response.getState()); + Assert.assertTrue(response.getLastHeartBeat() >= lastHearbeatTS); + String expectedAddress = + (response.getClientRMServiceAddress().split(":"))[0]; + Assert.assertEquals(expectedAddress, + (response.getAMRMServiceAddress().split(":"))[0]); + Assert.assertEquals(expectedAddress, + (response.getRMAdminServiceAddress().split(":"))[0]); + Assert.assertEquals(expectedAddress, + (response.getRMWebServiceAddress().split(":"))[0]); + lastHearbeatTS = response.getLastHeartBeat(); + return response.getCapability(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index ca979145308..06a16ffeffe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -91,6 +91,8 @@ public class RMStateStoreTestBase { public static final Log LOG = LogFactory.getLog(RMStateStoreTestBase.class); + protected final long epoch = 10L; + static class TestDispatcher implements Dispatcher, EventHandler { ApplicationAttemptId attemptId; @@ -564,13 +566,13 @@ public class RMStateStoreTestBase { store.setRMDispatcher(new TestDispatcher()); long firstTimeEpoch = store.getAndIncrementEpoch(); - Assert.assertEquals(0, firstTimeEpoch); + Assert.assertEquals(epoch, firstTimeEpoch); long secondTimeEpoch = store.getAndIncrementEpoch(); - Assert.assertEquals(1, secondTimeEpoch); + Assert.assertEquals(epoch + 1, secondTimeEpoch); long thirdTimeEpoch = store.getAndIncrementEpoch(); - Assert.assertEquals(2, thirdTimeEpoch); + Assert.assertEquals(epoch + 2, thirdTimeEpoch); } public void testAppDeletion(RMStateStoreHelper stateStoreHelper) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index 5eeb528bf44..0738730aabc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -117,6 +117,7 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { conf.setInt(YarnConfiguration.FS_RM_STATE_STORE_NUM_RETRIES, 8); conf.setLong(YarnConfiguration.FS_RM_STATE_STORE_RETRY_INTERVAL_MS, 900L); + conf.setLong(YarnConfiguration.RM_EPOCH, epoch); if (adminCheckEnable) { conf.setBoolean( YarnConfiguration.YARN_INTERMEDIATE_DATA_ENCRYPTION, true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java index e3d0f9c22ad..afd0c773bd3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestLeveldbRMStateStore.java @@ -82,6 +82,7 @@ public class TestLeveldbRMStateStore extends RMStateStoreTestBase { @Test(timeout = 60000) public void testEpoch() throws Exception { + conf.setLong(YarnConfiguration.RM_EPOCH, epoch); LeveldbStateStoreTester tester = new LeveldbStateStoreTester(); testEpoch(tester); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 942e9e88bcf..5ae82391bdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -189,6 +189,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { conf.set(YarnConfiguration.RM_ZK_ADDRESS, curatorTestingServer.getConnectString()); conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode); + conf.setLong(YarnConfiguration.RM_EPOCH, epoch); this.store = new TestZKRMStateStoreInternal(conf, workingZnode); return this.store; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java index 76dbc368705..90a681d2e9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java @@ -119,9 +119,8 @@ public class TestReservationInputValidator { } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); - Assert - .assertTrue(message - .equals("The queue is not specified. Please try again with a valid reservable queue.")); + Assert.assertEquals("The queue is not specified. Please try again with a " + + "valid reservable queue.", message); LOG.info(message); } } @@ -161,9 +160,8 @@ public class TestReservationInputValidator { } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); - Assert - .assertTrue(message - .equals("Missing reservation definition. Please try again by specifying a reservation definition.")); + Assert.assertEquals("Missing reservation definition. Please try again by " + + "specifying a reservation definition.", message); LOG.info(message); } } @@ -305,6 +303,7 @@ public class TestReservationInputValidator { @Test public void testSubmitReservationInvalidRecurrenceExpression() { + // first check recurrence expression ReservationSubmissionRequest request = createSimpleReservationSubmissionRequest(1, 1, 1, 5, 3, "123abc"); plan = null; @@ -320,6 +319,23 @@ public class TestReservationInputValidator { .startsWith("Invalid period ")); LOG.info(message); } + + // now check duration + request = + createSimpleReservationSubmissionRequest(1, 1, 1, 50, 3, "10"); + plan = null; + try { + plan = + rrValidator.validateReservationSubmissionRequest(rSystem, request, + ReservationSystemTestUtil.getNewReservationId()); + Assert.fail(); + } catch (YarnException e) { + Assert.assertNull(plan); + String message = e.getMessage(); + Assert.assertTrue(message + .startsWith("Duration of the requested reservation:")); + LOG.info(message); + } } @Test @@ -501,6 +517,73 @@ public class TestReservationInputValidator { } } + @Test + public void testUpdateReservationValidRecurrenceExpression() { + ReservationUpdateRequest request = + createSimpleReservationUpdateRequest(1, 1, 1, 5, 3, "600000"); + plan = null; + try { + plan = + rrValidator.validateReservationUpdateRequest(rSystem, request); + } catch (YarnException e) { + Assert.fail(e.getMessage()); + } + Assert.assertNotNull(plan); + } + + @Test + public void testUpdateReservationNegativeRecurrenceExpression() { + ReservationUpdateRequest request = + createSimpleReservationUpdateRequest(1, 1, 1, 5, 3, "-1234"); + plan = null; + try { + plan = + rrValidator.validateReservationUpdateRequest(rSystem, request); + Assert.fail(); + } catch (YarnException e) { + Assert.assertNull(plan); + String message = e.getMessage(); + Assert.assertTrue(message + .startsWith("Negative Period : ")); + LOG.info(message); + } + } + + @Test + public void testUpdateReservationInvalidRecurrenceExpression() { + // first check recurrence expression + ReservationUpdateRequest request = + createSimpleReservationUpdateRequest(1, 1, 1, 5, 3, "123abc"); + plan = null; + try { + plan = + rrValidator.validateReservationUpdateRequest(rSystem, request); + Assert.fail(); + } catch (YarnException e) { + Assert.assertNull(plan); + String message = e.getMessage(); + Assert.assertTrue(message + .startsWith("Invalid period ")); + LOG.info(message); + } + + // now check duration + request = + createSimpleReservationUpdateRequest(1, 1, 1, 50, 3, "10"); + plan = null; + try { + plan = + rrValidator.validateReservationUpdateRequest(rSystem, request); + Assert.fail(); + } catch (YarnException e) { + Assert.assertNull(plan); + String message = e.getMessage(); + Assert.assertTrue(message + .startsWith("Duration of the requested reservation:")); + LOG.info(message); + } + } + @Test public void testDeleteReservationNormal() { ReservationDeleteRequest request = new ReservationDeleteRequestPBImpl(); @@ -712,11 +795,19 @@ public class TestReservationInputValidator { private ReservationUpdateRequest createSimpleReservationUpdateRequest( int numRequests, int numContainers, long arrival, long deadline, long duration) { + return createSimpleReservationUpdateRequest(numRequests, numContainers, + arrival, deadline, duration, "0"); + } + + private ReservationUpdateRequest createSimpleReservationUpdateRequest( + int numRequests, int numContainers, long arrival, long deadline, + long duration, String recurrence) { // create a request with a single atomic ask ReservationUpdateRequest request = new ReservationUpdateRequestPBImpl(); ReservationDefinition rDef = new ReservationDefinitionPBImpl(); rDef.setArrival(arrival); rDef.setDeadline(deadline); + rDef.setRecurrenceExpression(recurrence); if (numRequests > 0) { ReservationRequests reqs = new ReservationRequestsPBImpl(); rDef.setReservationRequests(reqs); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index 854a4f3b305..cdc67ed60ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -760,12 +760,12 @@ public class TestSchedulerUtils { mock(Priority.class), ResourceRequest.ANY, resource, 1); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); - Assert.assertTrue(resReq.getNodeLabelExpression().equals("x")); + Assert.assertEquals("x", resReq.getNodeLabelExpression()); resReq.setNodeLabelExpression(" y "); SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext); - Assert.assertTrue(resReq.getNodeLabelExpression().equals("y")); + Assert.assertEquals("y", resReq.getNodeLabelExpression()); } catch (InvalidResourceRequestException e) { e.printStackTrace(); fail("Should be valid when request labels is a subset of queue labels"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 0642cd937b4..a52622246c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -42,7 +43,6 @@ import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; -import com.google.common.base.Supplier; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -167,6 +167,7 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import com.google.common.base.Supplier; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; @@ -233,6 +234,17 @@ public class TestCapacityScheduler { } } + private NodeManager registerNode(ResourceManager rm, String hostName, + int containerManagerPort, int httpPort, String rackName, + Resource capability) throws IOException, YarnException { + NodeManager nm = new NodeManager(hostName, + containerManagerPort, httpPort, rackName, capability, rm); + NodeAddedSchedulerEvent nodeAddEvent1 = + new NodeAddedSchedulerEvent(rm.getRMContext().getRMNodes() + .get(nm.getNodeId())); + rm.getResourceScheduler().handle(nodeAddEvent1); + return nm; + } @Test (timeout = 30000) public void testConfValidation() throws Exception { @@ -267,12 +279,12 @@ public class TestCapacityScheduler { } } - private org.apache.hadoop.yarn.server.resourcemanager.NodeManager + private NodeManager registerNode(String hostName, int containerManagerPort, int httpPort, String rackName, Resource capability) throws IOException, YarnException { - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm = - new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( + NodeManager nm = + new NodeManager( hostName, containerManagerPort, httpPort, rackName, capability, resourceManager); NodeAddedSchedulerEvent nodeAddEvent1 = @@ -400,8 +412,216 @@ public class TestCapacityScheduler { LOG.info("--- END: testCapacityScheduler ---"); } - private void nodeUpdate( - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm) { + @Test + public void testNotAssignMultiple() throws Exception { + LOG.info("--- START: testNotAssignMultiple ---"); + ResourceManager rm = new ResourceManager() { + @Override + protected RMNodeLabelsManager createNodeLabelManager() { + RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); + mgr.init(getConfig()); + return mgr; + } + }; + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + csConf.setBoolean( + CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); + setupQueueConfiguration(csConf); + YarnConfiguration conf = new YarnConfiguration(csConf); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + rm.init(conf); + rm.getRMContext().getContainerTokenSecretManager().rollMasterKey(); + rm.getRMContext().getNMTokenSecretManager().rollMasterKey(); + ((AsyncDispatcher) rm.getRMContext().getDispatcher()).start(); + RMContext mC = mock(RMContext.class); + when(mC.getConfigurationProvider()).thenReturn( + new LocalConfigurationProvider()); + + // Register node1 + String host0 = "host_0"; + NodeManager nm0 = + registerNode(rm, host0, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(10 * GB, 10)); + + // ResourceRequest priorities + Priority priority0 = Priority.newInstance(0); + Priority priority1 = Priority.newInstance(1); + + // Submit an application + Application application0 = new Application("user_0", "a1", rm); + application0.submit(); + application0.addNodeManager(host0, 1234, nm0); + + Resource capability00 = Resources.createResource(1 * GB, 1); + application0.addResourceRequestSpec(priority0, capability00); + + Resource capability01 = Resources.createResource(2 * GB, 1); + application0.addResourceRequestSpec(priority1, capability01); + + Task task00 = + new Task(application0, priority0, new String[] {host0}); + Task task01 = + new Task(application0, priority1, new String[] {host0}); + application0.addTask(task00); + application0.addTask(task01); + + // Submit another application + Application application1 = new Application("user_1", "b2", rm); + application1.submit(); + application1.addNodeManager(host0, 1234, nm0); + + Resource capability10 = Resources.createResource(3 * GB, 1); + application1.addResourceRequestSpec(priority0, capability10); + + Resource capability11 = Resources.createResource(4 * GB, 1); + application1.addResourceRequestSpec(priority1, capability11); + + Task task10 = new Task(application1, priority0, new String[] {host0}); + Task task11 = new Task(application1, priority1, new String[] {host0}); + application1.addTask(task10); + application1.addTask(task11); + + // Send resource requests to the scheduler + application0.schedule(); + + application1.schedule(); + + // Send a heartbeat to kick the tires on the Scheduler + LOG.info("Kick!"); + + // task00, used=1G + nodeUpdate(rm, nm0); + + // Get allocations from the scheduler + application0.schedule(); + application1.schedule(); + // 1 Task per heart beat should be scheduled + checkNodeResourceUsage(3 * GB, nm0); // task00 (1G) + checkApplicationResourceUsage(0 * GB, application0); + checkApplicationResourceUsage(3 * GB, application1); + + // Another heartbeat + nodeUpdate(rm, nm0); + application0.schedule(); + checkApplicationResourceUsage(1 * GB, application0); + application1.schedule(); + checkApplicationResourceUsage(3 * GB, application1); + checkNodeResourceUsage(4 * GB, nm0); + LOG.info("--- START: testNotAssignMultiple ---"); + } + + @Test + public void testAssignMultiple() throws Exception { + LOG.info("--- START: testAssignMultiple ---"); + ResourceManager rm = new ResourceManager() { + @Override + protected RMNodeLabelsManager createNodeLabelManager() { + RMNodeLabelsManager mgr = new NullRMNodeLabelsManager(); + mgr.init(getConfig()); + return mgr; + } + }; + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + csConf.setBoolean( + CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true); + // Each heartbeat will assign 2 containers at most + csConf.setInt(CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, 2); + setupQueueConfiguration(csConf); + YarnConfiguration conf = new YarnConfiguration(csConf); + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + rm.init(conf); + rm.getRMContext().getContainerTokenSecretManager().rollMasterKey(); + rm.getRMContext().getNMTokenSecretManager().rollMasterKey(); + ((AsyncDispatcher) rm.getRMContext().getDispatcher()).start(); + RMContext mC = mock(RMContext.class); + when(mC.getConfigurationProvider()).thenReturn( + new LocalConfigurationProvider()); + + // Register node1 + String host0 = "host_0"; + NodeManager nm0 = + registerNode(rm, host0, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(10 * GB, 10)); + + // ResourceRequest priorities + Priority priority0 = Priority.newInstance(0); + Priority priority1 = Priority.newInstance(1); + + // Submit an application + Application application0 = new Application("user_0", "a1", rm); + application0.submit(); + application0.addNodeManager(host0, 1234, nm0); + + Resource capability00 = Resources.createResource(1 * GB, 1); + application0.addResourceRequestSpec(priority0, capability00); + + Resource capability01 = Resources.createResource(2 * GB, 1); + application0.addResourceRequestSpec(priority1, capability01); + + Task task00 = new Task(application0, priority0, new String[] {host0}); + Task task01 = new Task(application0, priority1, new String[] {host0}); + application0.addTask(task00); + application0.addTask(task01); + + // Submit another application + Application application1 = new Application("user_1", "b2", rm); + application1.submit(); + application1.addNodeManager(host0, 1234, nm0); + + Resource capability10 = Resources.createResource(3 * GB, 1); + application1.addResourceRequestSpec(priority0, capability10); + + Resource capability11 = Resources.createResource(4 * GB, 1); + application1.addResourceRequestSpec(priority1, capability11); + + Task task10 = + new Task(application1, priority0, new String[] {host0}); + Task task11 = + new Task(application1, priority1, new String[] {host0}); + application1.addTask(task10); + application1.addTask(task11); + + // Send resource requests to the scheduler + application0.schedule(); + + application1.schedule(); + + // Send a heartbeat to kick the tires on the Scheduler + LOG.info("Kick!"); + + // task_0_0, used=1G + nodeUpdate(rm, nm0); + + // Get allocations from the scheduler + application0.schedule(); + application1.schedule(); + // 1 Task per heart beat should be scheduled + checkNodeResourceUsage(4 * GB, nm0); // task00 (1G) + checkApplicationResourceUsage(1 * GB, application0); + checkApplicationResourceUsage(3 * GB, application1); + + // Another heartbeat + nodeUpdate(rm, nm0); + application0.schedule(); + checkApplicationResourceUsage(3 * GB, application0); + application1.schedule(); + checkApplicationResourceUsage(7 * GB, application1); + checkNodeResourceUsage(10 * GB, nm0); + LOG.info("--- START: testAssignMultiple ---"); + } + + private void nodeUpdate(ResourceManager rm, NodeManager nm) { + RMNode node = rm.getRMContext().getRMNodes().get(nm.getNodeId()); + // Send a heartbeat to kick the tires on the Scheduler + NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node); + rm.getResourceScheduler().handle(nodeUpdate); + } + + private void nodeUpdate(NodeManager nm) { RMNode node = resourceManager.getRMContext().getRMNodes().get(nm.getNodeId()); // Send a heartbeat to kick the tires on the Scheduler NodeUpdateSchedulerEvent nodeUpdate = new NodeUpdateSchedulerEvent(node); @@ -446,6 +666,36 @@ public class TestCapacityScheduler { return conf; } + /** + * @param conf, to be modified + * @return, CS configuration which has deleted all childred of queue(b) + * root + * / \ + * a b + * / \ + * a1 a2 + */ + private CapacitySchedulerConfiguration setupQueueConfWithOutChildrenOfB( + CapacitySchedulerConfiguration conf) { + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, + new String[] {"a","b"}); + + conf.setCapacity(A, A_CAPACITY); + conf.setCapacity(B, B_CAPACITY); + + // Define 2nd-level queues + conf.setQueues(A, new String[] {"a1","a2"}); + conf.setCapacity(A1, A1_CAPACITY); + conf.setUserLimitFactor(A1, 100.0f); + conf.setCapacity(A2, A2_CAPACITY); + conf.setUserLimitFactor(A2, 100.0f); + + LOG.info("Setup top-level queues a and b (without children)"); + return conf; + } + /** * @param conf, to be modified * @return, CS configuration which has deleted a queue(b1) @@ -699,8 +949,7 @@ public class TestCapacityScheduler { Assert.assertEquals(expected, application.getUsedResources().getMemorySize()); } - private void checkNodeResourceUsage(int expected, - org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { + private void checkNodeResourceUsage(int expected, NodeManager node) { Assert.assertEquals(expected, node.getUsed().getMemorySize()); node.checkResourceUsage(); } @@ -1529,7 +1778,7 @@ public class TestCapacityScheduler { String queue = scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("a1")); + Assert.assertEquals("a1", queue); List appsInA = scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); @@ -1554,7 +1803,7 @@ public class TestCapacityScheduler { queue = scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("b1")); + Assert.assertEquals("b1", queue); appsInB = scheduler.getAppsInQueue("b"); assertTrue(appsInB.contains(appAttemptId)); @@ -1591,7 +1840,7 @@ public class TestCapacityScheduler { String queue = scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("a1")); + Assert.assertEquals("a1", queue); List appsInA = scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); @@ -1613,7 +1862,7 @@ public class TestCapacityScheduler { queue = scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("a2")); + Assert.assertEquals("a2", queue); appsInA1 = scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); @@ -2111,7 +2360,7 @@ public class TestCapacityScheduler { String queue = scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("a1")); + Assert.assertEquals("a1", queue); List appsInRoot = scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); @@ -2133,7 +2382,7 @@ public class TestCapacityScheduler { queue = scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("b1")); + Assert.assertEquals("b1", queue); appsInB = scheduler.getAppsInQueue("b"); assertTrue(appsInB.contains(appAttemptId)); @@ -2489,7 +2738,7 @@ public class TestCapacityScheduler { String queue = scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("a1")); + Assert.assertEquals("a1", queue); List appsInRoot = scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); @@ -4425,6 +4674,10 @@ public class TestCapacityScheduler { try { cs.reinitialize(conf, mockContext); } catch (IOException e) { + LOG.error( + "Expected to NOT throw exception when refresh queue tries to delete" + + " a queue WITHOUT running apps", + e); fail("Expected to NOT throw exception when refresh queue tries to delete" + " a queue WITHOUT running apps"); } @@ -4494,6 +4747,83 @@ public class TestCapacityScheduler { cs.stop(); } + /** + * Test for all child queue deletion and thus making parent queue a child. + * @throws Exception + */ + @Test + public void testRefreshQueuesWithAllChildQueuesDeleted() throws Exception { + CapacityScheduler cs = new CapacityScheduler(); + CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, + null, new RMContainerTokenSecretManager(conf), + new NMTokenSecretManagerInRM(conf), + new ClientToAMTokenSecretManagerInRM(), null); + setupQueueConfiguration(conf); + cs.setConf(new YarnConfiguration()); + cs.setRMContext(resourceManager.getRMContext()); + cs.init(conf); + cs.start(); + cs.reinitialize(conf, rmContext); + checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); + + // test delete all leaf queues when there is no application running. + Map queues = + cs.getCapacitySchedulerQueueManager().getQueues(); + + CSQueue bQueue = Mockito.spy((LeafQueue) queues.get("b1")); + when(bQueue.getState()).thenReturn(QueueState.RUNNING) + .thenReturn(QueueState.STOPPED); + queues.put("b1", bQueue); + + bQueue = Mockito.spy((LeafQueue) queues.get("b2")); + when(bQueue.getState()).thenReturn(QueueState.STOPPED); + queues.put("b2", bQueue); + + bQueue = Mockito.spy((LeafQueue) queues.get("b3")); + when(bQueue.getState()).thenReturn(QueueState.STOPPED); + queues.put("b3", bQueue); + + conf = new CapacitySchedulerConfiguration(); + setupQueueConfWithOutChildrenOfB(conf); + + // test convert parent queue to leaf queue(root.b) when there is no + // application running. + try { + cs.reinitialize(conf, mockContext); + fail("Expected to throw exception when refresh queue tries to make parent" + + " queue a child queue when one of its children is still running."); + } catch (IOException e) { + //do not do anything, expected exception + } + + // test delete leaf queues(root.b.b1,b2,b3) when there is no application + // running. + try { + cs.reinitialize(conf, mockContext); + } catch (IOException e) { + e.printStackTrace(); + fail("Expected to NOT throw exception when refresh queue tries to delete" + + " all children of a parent queue(without running apps)."); + } + CSQueue rootQueue = cs.getRootQueue(); + CSQueue queueB = findQueue(rootQueue, B); + assertNotNull("Parent Queue B should not be deleted", queueB); + Assert.assertTrue("As Queue'B children are not deleted", + queueB instanceof LeafQueue); + + String message = + "Refresh needs to support delete of all children of Parent queue."; + assertNull(message, + cs.getCapacitySchedulerQueueManager().getQueues().get("b3")); + assertNull(message, + cs.getCapacitySchedulerQueueManager().getQueues().get("b1")); + assertNull(message, + cs.getCapacitySchedulerQueueManager().getQueues().get("b2")); + + cs.stop(); + } + /** * Test if we can convert a leaf queue to a parent queue * @throws Exception diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java index 0eb89d7e3d5..0c3130dc2f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java @@ -20,7 +20,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -41,20 +44,26 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnSched import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; public class TestCapacitySchedulerAsyncScheduling { private final int GB = 1024; @@ -257,6 +266,144 @@ public class TestCapacitySchedulerAsyncScheduling { rm.stop(); } + // Testcase for YARN-6678 + @Test(timeout = 30000) + public void testCommitOutdatedReservedProposal() throws Exception { + // disable async-scheduling for simulating complex since scene + Configuration disableAsyncConf = new Configuration(conf); + disableAsyncConf.setBoolean( + CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false); + + // init RM & NMs & Nodes + final MockRM rm = new MockRM(disableAsyncConf); + rm.start(); + final MockNM nm1 = rm.registerNode("h1:1234", 9 * GB); + final MockNM nm2 = rm.registerNode("h2:2234", 9 * GB); + + // init scheduler nodes + int waitTime = 1000; + while (waitTime > 0 && + ((AbstractYarnScheduler) rm.getRMContext().getScheduler()) + .getNodeTracker().nodeCount() < 2) { + waitTime -= 10; + Thread.sleep(10); + } + Assert.assertEquals(2, + ((AbstractYarnScheduler) rm.getRMContext().getScheduler()) + .getNodeTracker().nodeCount()); + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + final SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + final SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + + // submit app1, am1 is running on nm1 + RMApp app = rm.submitApp(200, "app", "user", null, "default"); + final MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + // submit app2, am2 is running on nm1 + RMApp app2 = rm.submitApp(200, "app", "user", null, "default"); + final MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1); + + // allocate and launch 2 containers for app1 + allocateAndLaunchContainers(am, nm1, rm, 1, + Resources.createResource(5 * GB), 0, 2); + allocateAndLaunchContainers(am, nm2, rm, 1, + Resources.createResource(5 * GB), 0, 3); + + // nm1 runs 3 containers(app1-container_01/AM, app1-container_02, + // app2-container_01/AM) + // nm2 runs 1 container(app1-container_03) + Assert.assertEquals(3, sn1.getNumContainers()); + Assert.assertEquals(1, sn2.getNumContainers()); + + // reserve 1 container(app1-container_04) for app1 on nm1 + ResourceRequest rr2 = ResourceRequest + .newInstance(Priority.newInstance(0), "*", + Resources.createResource(5 * GB), 1); + am.allocate(Arrays.asList(rr2), null); + nm1.nodeHeartbeat(true); + // wait app1-container_04 reserved on nm1 + waitTime = 1000; + while (waitTime > 0 && sn1.getReservedContainer() == null) { + waitTime -= 10; + Thread.sleep(10); + } + Assert.assertNotNull(sn1.getReservedContainer()); + + final CapacityScheduler cs = (CapacityScheduler) scheduler; + final CapacityScheduler spyCs = Mockito.spy(cs); + final AtomicBoolean isFirstReserve = new AtomicBoolean(true); + final AtomicBoolean isChecked = new AtomicBoolean(false); + // handle CapacityScheduler#tryCommit, + // reproduce the process that can raise IllegalStateException before + Mockito.doAnswer(new Answer() { + public Object answer(InvocationOnMock invocation) throws Exception { + ResourceCommitRequest request = + (ResourceCommitRequest) invocation.getArguments()[1]; + if (request.getContainersToReserve().size() > 0 && isFirstReserve + .compareAndSet(true, false)) { + // release app1-container_03 on nm2 + RMContainer killableContainer = + sn2.getCopiedListOfRunningContainers().get(0); + cs.completedContainer(killableContainer, ContainerStatus + .newInstance(killableContainer.getContainerId(), + ContainerState.COMPLETE, "", + ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), + RMContainerEventType.KILL); + Assert.assertEquals(0, sn2.getCopiedListOfRunningContainers().size()); + // unreserve app1-container_04 on nm1 + // and allocate app1-container_05 on nm2 + cs.handle(new NodeUpdateSchedulerEvent(sn2.getRMNode())); + int waitTime = 1000; + while (waitTime > 0 + && sn2.getCopiedListOfRunningContainers().size() == 0) { + waitTime -= 10; + Thread.sleep(10); + } + Assert.assertEquals(1, sn2.getCopiedListOfRunningContainers().size()); + Assert.assertNull(sn1.getReservedContainer()); + + // reserve app2-container_02 on nm1 + ResourceRequest rr3 = ResourceRequest + .newInstance(Priority.newInstance(0), "*", + Resources.createResource(5 * GB), 1); + am2.allocate(Arrays.asList(rr3), null); + cs.handle(new NodeUpdateSchedulerEvent(sn1.getRMNode())); + waitTime = 1000; + while (waitTime > 0 && sn1.getReservedContainer() == null) { + waitTime -= 10; + Thread.sleep(10); + } + Assert.assertNotNull(sn1.getReservedContainer()); + + // call real apply + try { + cs.tryCommit((Resource) invocation.getArguments()[0], + (ResourceCommitRequest) invocation.getArguments()[1]); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + isChecked.set(true); + } else { + cs.tryCommit((Resource) invocation.getArguments()[0], + (ResourceCommitRequest) invocation.getArguments()[1]); + } + return null; + } + }).when(spyCs).tryCommit(Mockito.any(Resource.class), + Mockito.any(ResourceCommitRequest.class)); + + spyCs.handle(new NodeUpdateSchedulerEvent(sn1.getRMNode())); + + waitTime = 1000; + while (waitTime > 0 && !isChecked.get()) { + waitTime -= 10; + Thread.sleep(10); + } + rm.stop(); + } private void allocateAndLaunchContainers(MockAM am, MockNM nm, MockRM rm, int nContainer, Resource resource, int priority, int startContainerId) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java index 483ba1bc95d..9aba30c2e88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java @@ -222,7 +222,7 @@ public class TestCapacitySchedulerDynamicBehavior { String queue = scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue() .getQueueName(); - Assert.assertTrue(queue.equals("b1")); + Assert.assertEquals("b1", queue); List appsInRoot = scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index b4b05ed2bb0..291a74ed599 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -51,8 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler - .SchedulerApplicationAttempt; + import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica @@ -60,11 +59,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; public class TestContainerResizing { @@ -205,7 +202,7 @@ public class TestContainerResizing { RMNodeImpl rmNode = (RMNodeImpl) rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); Collection decreasedContainers = - rmNode.getToBeDecreasedContainers(); + rmNode.getToBeUpdatedContainers(); boolean rmNodeReceivedDecreaseContainer = false; for (Container c : decreasedContainers) { if (c.getId().equals(containerId1) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java index 184e8547324..a76ed6414f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java @@ -319,7 +319,7 @@ public class TestIncreaseAllocationExpirer { verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB); // Verify NM receives the decrease message (3G) List containersToDecrease = - nm1.nodeHeartbeat(true).getContainersToDecrease(); + nm1.nodeHeartbeat(true).getContainersToUpdate(); Assert.assertEquals(1, containersToDecrease.size()); Assert.assertEquals( 3 * GB, containersToDecrease.get(0).getResource().getMemorySize()); @@ -435,7 +435,7 @@ public class TestIncreaseAllocationExpirer { .getAllocatedResource().getMemorySize()); // Verify NM receives 2 decrease message List containersToDecrease = - nm1.nodeHeartbeat(true).getContainersToDecrease(); + nm1.nodeHeartbeat(true).getContainersToUpdate(); Assert.assertEquals(2, containersToDecrease.size()); // Sort the list to make sure containerId3 is the first Collections.sort(containersToDecrease); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 2864d7fd7f2..d45f756a2e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -820,8 +820,6 @@ public class TestLeafQueue { applyCSAssignment(clusterResource, assign, b, nodes, apps); } while (assign.getResource().getMemorySize() > 0 && assign.getAssignmentInformation().getNumReservations() == 0); - //LOG.info("user_0: " + queueUser0.getUsed()); - //LOG.info("user_1: " + queueUser1.getUsed()); assertTrue("Verify user_0 got resources ", queueUser0.getUsed() .getMemorySize() > 0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerNode.java index 3927b00f68e..0e3d3445b25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerNode.java @@ -31,6 +31,7 @@ import java.util.Collections; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -68,6 +69,16 @@ public class TestFSSchedulerNode { when(container.getExecutionType()).thenReturn(ExecutionType.GUARANTEED); when(container.getAllocatedResource()). thenReturn(Resources.clone(request)); + when(container.compareTo(any())).thenAnswer(new Answer() { + public Integer answer(InvocationOnMock invocation) { + return + Long.compare( + ((RMContainer)invocation.getMock()).getContainerId() + .getContainerId(), + ((RMContainer)invocation.getArguments()[0]).getContainerId() + .getContainerId()); + } + }); containers.add(container); return container; } @@ -224,6 +235,47 @@ public class TestFSSchedulerNode { finalValidation(schedulerNode); } + /** + * Allocate a single container twice and release. + */ + @Test + public void testDuplicatePreemption() { + RMNode node = createNode(); + FSSchedulerNode schedulerNode = new FSSchedulerNode(node, false); + + // Launch containers and saturate the cluster + saturateCluster(schedulerNode); + assertEquals("Container should be allocated", + Resources.multiply(containers.get(0).getContainer().getResource(), + containers.size()), + schedulerNode.getAllocatedResource()); + + // Request preemption twice + FSAppAttempt starvingApp = createStarvingApp(schedulerNode, + Resource.newInstance(1024, 1)); + schedulerNode.addContainersForPreemption( + Collections.singletonList(containers.get(0)), starvingApp); + schedulerNode.addContainersForPreemption( + Collections.singletonList(containers.get(0)), starvingApp); + assertEquals( + "No resource amount should be reserved for preemptees", + containers.get(0).getAllocatedResource(), + schedulerNode.getTotalReserved()); + + // Preemption occurs release one container + schedulerNode.releaseContainer(containers.get(0).getContainerId(), true); + allocateContainers(schedulerNode); + assertEquals("Container should be allocated", + schedulerNode.getTotalResource(), + schedulerNode.getAllocatedResource()); + + // Release all remaining containers + for (int i = 1; i < containers.size(); ++i) { + schedulerNode.releaseContainer(containers.get(i).getContainerId(), true); + } + finalValidation(schedulerNode); + } + /** * Allocate and release three containers requested by two apps. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 0d54c3322c8..941c215df4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -2107,49 +2107,49 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); + int minReqSize = + FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB; + // First ask, queue1 requests 1 large (minReqSize * 2). ApplicationAttemptId id11 = createAppAttemptId(1, 1); createMockRMApp(id11); - scheduler.addApplication(id11.getApplicationId(), "root.queue1", "user1", false); + scheduler.addApplication(id11.getApplicationId(), + "root.queue1", "user1", false); scheduler.addApplicationAttempt(id11, false, false); - ApplicationAttemptId id21 = createAppAttemptId(2, 1); - createMockRMApp(id21); - scheduler.addApplication(id21.getApplicationId(), "root.queue2", "user1", false); - scheduler.addApplicationAttempt(id21, false, false); - ApplicationAttemptId id22 = createAppAttemptId(2, 2); - createMockRMApp(id22); - - scheduler.addApplication(id22.getApplicationId(), "root.queue2", "user1", false); - scheduler.addApplicationAttempt(id22, false, false); - - int minReqSize = - FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB; - - // First ask, queue1 requests 1 large (minReqSize * 2). List ask1 = new ArrayList(); - ResourceRequest request1 = - createResourceRequest(minReqSize * 2, ResourceRequest.ANY, 1, 1, true); + ResourceRequest request1 = createResourceRequest(minReqSize * 2, + ResourceRequest.ANY, 1, 1, true); ask1.add(request1); scheduler.allocate(id11, ask1, new ArrayList(), null, null, NULL_UPDATE_REQUESTS); // Second ask, queue2 requests 1 large. + ApplicationAttemptId id21 = createAppAttemptId(2, 1); + createMockRMApp(id21); + scheduler.addApplication(id21.getApplicationId(), + "root.queue2", "user1", false); + scheduler.addApplicationAttempt(id21, false, false); List ask2 = new ArrayList(); - ResourceRequest request2 = createResourceRequest(2 * minReqSize, "foo", 1, 1, - false); + ResourceRequest request2 = createResourceRequest(2 * minReqSize, + "foo", 1, 1, false); ResourceRequest request3 = createResourceRequest(2 * minReqSize, - ResourceRequest.ANY, 1, 1, false); + ResourceRequest.ANY, 1, 1, false); ask2.add(request2); ask2.add(request3); scheduler.allocate(id21, ask2, new ArrayList(), null, null, NULL_UPDATE_REQUESTS); // Third ask, queue2 requests 2 small (minReqSize). + ApplicationAttemptId id22 = createAppAttemptId(2, 2); + createMockRMApp(id22); + scheduler.addApplication(id22.getApplicationId(), + "root.queue2", "user1", false); + scheduler.addApplicationAttempt(id22, false, false); List ask3 = new ArrayList(); - ResourceRequest request4 = createResourceRequest(minReqSize, "bar", 2, 2, - true); + ResourceRequest request4 = createResourceRequest(minReqSize, + "bar", 2, 2, true); ResourceRequest request5 = createResourceRequest(minReqSize, - ResourceRequest.ANY, 2, 2, true); + ResourceRequest.ANY, 2, 2, true); ask3.add(request4); ask3.add(request5); scheduler.allocate(id22, ask3, new ArrayList(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java index bcf2b21f884..2f7ecde2fb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/webapp/TestRMWithXFSFilter.java @@ -39,8 +39,8 @@ import org.junit.Test; import java.util.HashMap; import java.util.Map; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; /** * Used TestRMWebServices as an example of web invocations of RM and added @@ -72,9 +72,9 @@ public class TestRMWithXFSFilter extends JerseyTestBase { ClientResponse response = r.path("ws").path("v1").path("cluster") .path("info").accept("application/xml") .get(ClientResponse.class); - assertTrue("Should have received DENY x-frame options header", - response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS).get(0) - .equals("DENY")); + assertEquals("Should have received DENY x-frame options header", + "DENY", + response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS).get(0)); } protected void createInjector(String headerValue) { @@ -123,9 +123,9 @@ public class TestRMWithXFSFilter extends JerseyTestBase { ClientResponse response = r.path("ws").path("v1").path("cluster") .path("info").accept("application/xml") .get(ClientResponse.class); - assertTrue("Should have received SAMEORIGIN x-frame options header", - response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS).get(0) - .equals("SAMEORIGIN")); + assertEquals("Should have received SAMEORIGIN x-frame options header", + "SAMEORIGIN", + response.getHeaders().get(XFrameOptionsFilter.X_FRAME_OPTIONS).get(0)); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml new file mode 100644 index 00000000000..e8b4d565a81 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml @@ -0,0 +1,110 @@ + + + + + hadoop-yarn-server + org.apache.hadoop + 3.0.0-beta1-SNAPSHOT + + 4.0.0 + org.apache.hadoop + hadoop-yarn-server-router + 3.0.0-beta1-SNAPSHOT + Apache Hadoop YARN Router + + + + ${project.parent.parent.basedir} + + + + + org.apache.hadoop + hadoop-yarn-api + + + + org.apache.hadoop + hadoop-common + + + + org.apache.hadoop + hadoop-yarn-common + + + + org.apache.hadoop + hadoop-yarn-server-common + + + + org.apache.hadoop + hadoop-yarn-server-common + test-jar + test + + + + junit + junit + test + + + + + org.apache.hadoop + hadoop-common + test-jar + test + + + + org.apache.hadoop + hadoop-yarn-server-resourcemanager + + + + org.apache.hadoop + hadoop-yarn-server-nodemanager + test + + + + org.mockito + mockito-all + test + + + + com.google.inject + guice + + + + + + + + org.apache.rat + apache-rat-plugin + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java new file mode 100644 index 00000000000..121e5344fdb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil; +import org.apache.hadoop.yarn.server.router.clientrm.RouterClientRMService; +import org.apache.hadoop.yarn.server.router.rmadmin.RouterRMAdminService; +import org.apache.hadoop.yarn.server.router.webapp.RouterWebApp; +import org.apache.hadoop.yarn.webapp.WebApp; +import org.apache.hadoop.yarn.webapp.WebApps; +import org.apache.hadoop.yarn.webapp.WebApps.Builder; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * The router is a stateless YARN component which is the entry point to the + * cluster. It can be deployed on multiple nodes behind a Virtual IP (VIP) with + * a LoadBalancer. + * + * The Router exposes the ApplicationClientProtocol (RPC and REST) to the + * outside world, transparently hiding the presence of ResourceManager(s), which + * allows users to request and update reservations, submit and kill + * applications, and request status on running applications. + * + * In addition, it exposes the ResourceManager Admin API. + * + * This provides a placeholder for throttling mis-behaving clients (YARN-1546) + * and masks the access to multiple RMs (YARN-3659). + */ +public class Router extends CompositeService { + + private static final Logger LOG = LoggerFactory.getLogger(Router.class); + private static CompositeServiceShutdownHook routerShutdownHook; + private Configuration conf; + private AtomicBoolean isStopping = new AtomicBoolean(false); + private RouterClientRMService clientRMProxyService; + private RouterRMAdminService rmAdminProxyService; + private WebApp webApp; + @VisibleForTesting + protected String webAppAddress; + + /** + * Priority of the Router shutdown hook. + */ + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + + public Router() { + super(Router.class.getName()); + } + + protected void doSecureLogin() throws IOException { + // TODO YARN-6539 Create SecureLogin inside Router + } + + @Override + protected void serviceInit(Configuration config) throws Exception { + this.conf = config; + // ClientRM Proxy + clientRMProxyService = createClientRMProxyService(); + addService(clientRMProxyService); + // RMAdmin Proxy + rmAdminProxyService = createRMAdminProxyService(); + addService(rmAdminProxyService); + // WebService + webAppAddress = WebAppUtils.getWebAppBindURL(this.conf, + YarnConfiguration.ROUTER_BIND_HOST, + WebAppUtils.getRouterWebAppURLWithoutScheme(this.conf)); + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + try { + doSecureLogin(); + } catch (IOException e) { + throw new YarnRuntimeException("Failed Router login", e); + } + startWepApp(); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (webApp != null) { + webApp.stop(); + } + if (isStopping.getAndSet(true)) { + return; + } + super.serviceStop(); + } + + protected void shutDown() { + new Thread() { + @Override + public void run() { + Router.this.stop(); + } + }.start(); + } + + protected RouterClientRMService createClientRMProxyService() { + return new RouterClientRMService(); + } + + protected RouterRMAdminService createRMAdminProxyService() { + return new RouterRMAdminService(); + } + + @Private + public WebApp getWebapp() { + return this.webApp; + } + + @VisibleForTesting + public void startWepApp() { + + RMWebAppUtil.setupSecurityAndFilters(conf, null); + + Builder builder = + WebApps.$for("cluster", null, null, "ws").with(conf).at(webAppAddress); + webApp = builder.start(new RouterWebApp(this)); + } + + public static void main(String[] argv) { + Configuration conf = new YarnConfiguration(); + Thread + .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); + StringUtils.startupShutdownMessage(Router.class, argv, LOG); + Router router = new Router(); + try { + + // Remove the old hook if we are rebooting. + if (null != routerShutdownHook) { + ShutdownHookManager.get().removeShutdownHook(routerShutdownHook); + } + + routerShutdownHook = new CompositeServiceShutdownHook(router); + ShutdownHookManager.get().addShutdownHook(routerShutdownHook, + SHUTDOWN_HOOK_PRIORITY); + + router.init(conf); + router.start(); + } catch (Throwable t) { + LOG.error("Error starting Router", t); + System.exit(-1); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterServerUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterServerUtil.java new file mode 100644 index 00000000000..cc96da62331 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterServerUtil.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Common utility methods used by the Router server. + * + */ +@Private +@Unstable +public final class RouterServerUtil { + + /** Disable constructor. */ + private RouterServerUtil() { + } + + public static final Logger LOG = + LoggerFactory.getLogger(RouterServerUtil.class); + + /** + * Throws an exception due to an error. + * + * @param errMsg the error message + * @param t the throwable raised in the called class. + * @throws YarnException on failure + */ + @Public + @Unstable + public static void logAndThrowException(String errMsg, Throwable t) + throws YarnException { + if (t != null) { + LOG.error(errMsg, t); + throw new YarnException(errMsg, t); + } else { + LOG.error(errMsg); + throw new YarnException(errMsg); + } + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/AbstractClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/AbstractClientRequestInterceptor.java new file mode 100644 index 00000000000..01ba3bdcadf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/AbstractClientRequestInterceptor.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Implements the {@link ClientRequestInterceptor} interface and provides common + * functionality which can can be used and/or extended by other concrete + * intercepter classes. + * + */ +public abstract class AbstractClientRequestInterceptor + implements ClientRequestInterceptor { + + private static final Logger LOG = + LoggerFactory.getLogger(AbstractClientRequestInterceptor.class); + + private Configuration conf; + private ClientRequestInterceptor nextInterceptor; + + @SuppressWarnings("checkstyle:visibilitymodifier") + protected UserGroupInformation user = null; + + /** + * Sets the {@link ClientRequestInterceptor} in the chain. + */ + @Override + public void setNextInterceptor(ClientRequestInterceptor nextInterceptor) { + this.nextInterceptor = nextInterceptor; + } + + /** + * Sets the {@link Configuration}. + */ + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + if (this.nextInterceptor != null) { + this.nextInterceptor.setConf(conf); + } + } + + /** + * Gets the {@link Configuration}. + */ + @Override + public Configuration getConf() { + return this.conf; + } + + /** + * Initializes the {@link ClientRequestInterceptor}. + */ + @Override + public void init(String userName) { + setupUser(userName); + if (this.nextInterceptor != null) { + this.nextInterceptor.init(userName); + } + } + + /** + * Disposes the {@link ClientRequestInterceptor}. + */ + @Override + public void shutdown() { + if (this.nextInterceptor != null) { + this.nextInterceptor.shutdown(); + } + } + + /** + * Gets the next {@link ClientRequestInterceptor} in the chain. + */ + @Override + public ClientRequestInterceptor getNextInterceptor() { + return this.nextInterceptor; + } + + private void setupUser(String userName) { + + try { + // Do not create a proxy user if user name matches the user name on + // current UGI + if (userName.equalsIgnoreCase( + UserGroupInformation.getCurrentUser().getUserName())) { + user = UserGroupInformation.getCurrentUser(); + } else { + user = UserGroupInformation.createProxyUser(userName, + UserGroupInformation.getCurrentUser()); + } + } catch (IOException e) { + String message = "Error while creating Router ClientRM Service for user:"; + if (user != null) { + message += ", user: " + user; + } + + LOG.info(message); + throw new YarnRuntimeException(message, e); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/ClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/ClientRequestInterceptor.java new file mode 100644 index 00000000000..2f8fb936345 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/ClientRequestInterceptor.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; + +/** + * Defines the contract to be implemented by the request intercepter classes, + * that can be used to intercept and inspect messages sent from the client to + * the resource manager. + */ +public interface ClientRequestInterceptor + extends ApplicationClientProtocol, Configurable { + /** + * This method is called for initializing the intercepter. This is guaranteed + * to be called only once in the lifetime of this instance. + * + * @param user the name of the client + */ + void init(String user); + + /** + * This method is called to release the resources held by the intercepter. + * This will be called when the application pipeline is being destroyed. The + * concrete implementations should dispose the resources and forward the + * request to the next intercepter, if any. + */ + void shutdown(); + + /** + * Sets the next intercepter in the pipeline. The concrete implementation of + * this interface should always pass the request to the nextInterceptor after + * inspecting the message. The last intercepter in the chain is responsible to + * send the messages to the resource manager service and so the last + * intercepter will not receive this method call. + * + * @param nextInterceptor the ClientRequestInterceptor to set in the pipeline + */ + void setNextInterceptor(ClientRequestInterceptor nextInterceptor); + + /** + * Returns the next intercepter in the chain. + * + * @return the next intercepter in the chain + */ + ClientRequestInterceptor getNextInterceptor(); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java new file mode 100644 index 00000000000..71de6b470e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java @@ -0,0 +1,311 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; +import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Extends the {@code AbstractRequestInterceptorClient} class and provides an + * implementation that simply forwards the client requests to the cluster + * resource manager. + * + */ +public class DefaultClientRequestInterceptor + extends AbstractClientRequestInterceptor { + private ApplicationClientProtocol clientRMProxy; + + @Override + public void init(String userName) { + super.init(userName); + + final Configuration conf = this.getConf(); + try { + clientRMProxy = + user.doAs(new PrivilegedExceptionAction() { + @Override + public ApplicationClientProtocol run() throws Exception { + return ClientRMProxy.createRMProxy(conf, + ApplicationClientProtocol.class); + } + }); + } catch (Exception e) { + throw new YarnRuntimeException( + "Unable to create the interface to reach the YarnRM", e); + } + } + + @Override + public void setNextInterceptor(ClientRequestInterceptor next) { + throw new YarnRuntimeException( + "setNextInterceptor is being called on DefaultRequestInterceptor," + + "which should be the last one in the chain " + + "Check if the interceptor pipeline configuration is correct"); + } + + @Override + public GetNewApplicationResponse getNewApplication( + GetNewApplicationRequest request) throws YarnException, IOException { + return clientRMProxy.getNewApplication(request); + } + + @Override + public SubmitApplicationResponse submitApplication( + SubmitApplicationRequest request) throws YarnException, IOException { + return clientRMProxy.submitApplication(request); + } + + @Override + public KillApplicationResponse forceKillApplication( + KillApplicationRequest request) throws YarnException, IOException { + return clientRMProxy.forceKillApplication(request); + } + + @Override + public GetClusterMetricsResponse getClusterMetrics( + GetClusterMetricsRequest request) throws YarnException, IOException { + return clientRMProxy.getClusterMetrics(request); + } + + @Override + public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) + throws YarnException, IOException { + return clientRMProxy.getClusterNodes(request); + } + + @Override + public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) + throws YarnException, IOException { + return clientRMProxy.getQueueInfo(request); + } + + @Override + public GetQueueUserAclsInfoResponse getQueueUserAcls( + GetQueueUserAclsInfoRequest request) throws YarnException, IOException { + return clientRMProxy.getQueueUserAcls(request); + } + + @Override + public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( + MoveApplicationAcrossQueuesRequest request) + throws YarnException, IOException { + return clientRMProxy.moveApplicationAcrossQueues(request); + } + + @Override + public GetNewReservationResponse getNewReservation( + GetNewReservationRequest request) throws YarnException, IOException { + return clientRMProxy.getNewReservation(request); + } + + @Override + public ReservationSubmissionResponse submitReservation( + ReservationSubmissionRequest request) throws YarnException, IOException { + return clientRMProxy.submitReservation(request); + } + + @Override + public ReservationListResponse listReservations( + ReservationListRequest request) throws YarnException, IOException { + return clientRMProxy.listReservations(request); + } + + @Override + public ReservationUpdateResponse updateReservation( + ReservationUpdateRequest request) throws YarnException, IOException { + return clientRMProxy.updateReservation(request); + } + + @Override + public ReservationDeleteResponse deleteReservation( + ReservationDeleteRequest request) throws YarnException, IOException { + return clientRMProxy.deleteReservation(request); + } + + @Override + public GetNodesToLabelsResponse getNodeToLabels( + GetNodesToLabelsRequest request) throws YarnException, IOException { + return clientRMProxy.getNodeToLabels(request); + } + + @Override + public GetLabelsToNodesResponse getLabelsToNodes( + GetLabelsToNodesRequest request) throws YarnException, IOException { + return clientRMProxy.getLabelsToNodes(request); + } + + @Override + public GetClusterNodeLabelsResponse getClusterNodeLabels( + GetClusterNodeLabelsRequest request) throws YarnException, IOException { + return clientRMProxy.getClusterNodeLabels(request); + } + + @Override + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException { + return clientRMProxy.getApplicationReport(request); + } + + @Override + public GetApplicationsResponse getApplications(GetApplicationsRequest request) + throws YarnException, IOException { + return clientRMProxy.getApplications(request); + } + + @Override + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) + throws YarnException, IOException { + return clientRMProxy.getApplicationAttemptReport(request); + } + + @Override + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, IOException { + return clientRMProxy.getApplicationAttempts(request); + } + + @Override + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException { + return clientRMProxy.getContainerReport(request); + } + + @Override + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException { + return clientRMProxy.getContainers(request); + } + + @Override + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException { + return clientRMProxy.getDelegationToken(request); + } + + @Override + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException { + return clientRMProxy.renewDelegationToken(request); + } + + @Override + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException { + return clientRMProxy.cancelDelegationToken(request); + } + + @Override + public FailApplicationAttemptResponse failApplicationAttempt( + FailApplicationAttemptRequest request) throws YarnException, IOException { + return clientRMProxy.failApplicationAttempt(request); + } + + @Override + public UpdateApplicationPriorityResponse updateApplicationPriority( + UpdateApplicationPriorityRequest request) + throws YarnException, IOException { + return clientRMProxy.updateApplicationPriority(request); + } + + @Override + public SignalContainerResponse signalToContainer( + SignalContainerRequest request) throws YarnException, IOException { + return clientRMProxy.signalToContainer(request); + } + + @Override + public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( + UpdateApplicationTimeoutsRequest request) + throws YarnException, IOException { + return clientRMProxy.updateApplicationTimeouts(request); + } + + @VisibleForTesting + public void setRMClient(ApplicationClientProtocol clientRM) { + this.clientRMProxy = clientRM; + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java new file mode 100644 index 00000000000..7268ebd949a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -0,0 +1,677 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.failover.FederationProxyProviderUtil; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.RouterPolicyFacade; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.router.RouterServerUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Extends the {@code AbstractRequestInterceptorClient} class and provides an + * implementation for federation of YARN RM and scaling an application across + * multiple YARN SubClusters. All the federation specific implementation is + * encapsulated in this class. This is always the last intercepter in the chain. + */ +public class FederationClientInterceptor + extends AbstractClientRequestInterceptor { + + /* + * TODO YARN-6740 Federation Router (hiding multiple RMs for + * ApplicationClientProtocol) phase 2. + * + * The current implementation finalized the main 4 calls (getNewApplication, + * submitApplication, forceKillApplication and getApplicationReport). Those + * allow us to execute applications E2E. + */ + + private static final Logger LOG = + LoggerFactory.getLogger(FederationClientInterceptor.class); + + private int numSubmitRetries; + private Map clientRMProxies; + private FederationStateStoreFacade federationFacade; + private Random rand; + private RouterPolicyFacade policyFacade; + + @Override + public void init(String userName) { + super.init(userName); + + federationFacade = FederationStateStoreFacade.getInstance(); + rand = new Random(System.currentTimeMillis()); + + final Configuration conf = this.getConf(); + + try { + policyFacade = new RouterPolicyFacade(conf, federationFacade, + this.federationFacade.getSubClusterResolver(), null); + } catch (FederationPolicyInitializationException e) { + LOG.error(e.getMessage()); + } + + numSubmitRetries = + conf.getInt(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY, + YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY); + + clientRMProxies = + new ConcurrentHashMap(); + + } + + @Override + public void setNextInterceptor(ClientRequestInterceptor next) { + throw new YarnRuntimeException("setNextInterceptor is being called on " + + "FederationClientRequestInterceptor, which should be the last one " + + "in the chain. Check if the interceptor pipeline configuration " + + "is correct"); + } + + @VisibleForTesting + protected ApplicationClientProtocol getClientRMProxyForSubCluster( + SubClusterId subClusterId) throws YarnException { + + if (clientRMProxies.containsKey(subClusterId)) { + return clientRMProxies.get(subClusterId); + } + + ApplicationClientProtocol clientRMProxy = null; + try { + clientRMProxy = FederationProxyProviderUtil.createRMProxy(getConf(), + ApplicationClientProtocol.class, subClusterId, user); + } catch (Exception e) { + RouterServerUtil.logAndThrowException( + "Unable to create the interface to reach the SubCluster " + + subClusterId, + e); + } + + clientRMProxies.put(subClusterId, clientRMProxy); + return clientRMProxy; + } + + private SubClusterId getRandomActiveSubCluster( + Map activeSubclusters) + throws YarnException { + + if (activeSubclusters == null || activeSubclusters.size() < 1) { + RouterServerUtil.logAndThrowException( + FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE, null); + } + List list = new ArrayList<>(activeSubclusters.keySet()); + + return list.get(rand.nextInt(list.size())); + } + + /** + * Yarn Router forwards every getNewApplication requests to any RM. During + * this operation there will be no communication with the State Store. The + * Router will forward the requests to any SubCluster. The Router will retry + * to submit the request on #numSubmitRetries different SubClusters. The + * SubClusters are randomly chosen from the active ones. + * + * Possible failures and behaviors: + * + * Client: identical behavior as {@code ClientRMService}. + * + * Router: the Client will timeout and resubmit. + * + * ResourceManager: the Router will timeout and contacts another RM. + * + * StateStore: not in the execution. + */ + @Override + public GetNewApplicationResponse getNewApplication( + GetNewApplicationRequest request) throws YarnException, IOException { + Map subClustersActive = + federationFacade.getSubClusters(true); + + for (int i = 0; i < numSubmitRetries; ++i) { + SubClusterId subClusterId = getRandomActiveSubCluster(subClustersActive); + LOG.debug( + "getNewApplication try #" + i + " on SubCluster " + subClusterId); + ApplicationClientProtocol clientRMProxy = + getClientRMProxyForSubCluster(subClusterId); + GetNewApplicationResponse response = null; + try { + response = clientRMProxy.getNewApplication(request); + } catch (Exception e) { + LOG.warn("Unable to create a new ApplicationId in SubCluster " + + subClusterId.getId(), e); + } + + if (response != null) { + return response; + } else { + // Empty response from the ResourceManager. + // Blacklist this subcluster for this request. + subClustersActive.remove(subClusterId); + } + + } + + String errMsg = "Fail to create a new application."; + LOG.error(errMsg); + throw new YarnException(errMsg); + } + + /** + * Today, in YARN there are no checks of any applicationId submitted. + * + * Base scenarios: + * + * The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into + * StateStore with the selected SubCluster (e.g. SC1) and the appId. • The + * State Store replies with the selected SubCluster (e.g. SC1). • The Router + * submits the request to the selected SubCluster. + * + * In case of State Store failure: + * + * The client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • Due to the + * State Store down the Router times out and it will retry depending on the + * FederationFacade settings. • The Router replies to the client with an error + * message. + * + * If State Store fails after inserting the tuple: identical behavior as + * {@code ClientRMService}. + * + * In case of Router failure: + * + * Scenario 1 – Crash before submission to the ResourceManager + * + * The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • The Router + * crashes. • The Client timeouts and resubmits the application. • The Router + * selects one SubCluster to forward the request. • The Router inserts a tuple + * into State Store with the selected SubCluster (e.g. SC2) and the appId. • + * Because the tuple is already inserted in the State Store, it returns the + * previous selected SubCluster (e.g. SC1). • The Router submits the request + * to the selected SubCluster (e.g. SC1). + * + * Scenario 2 – Crash after submission to the ResourceManager + * + * • The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • The Router + * submits the request to the selected SubCluster. • The Router crashes. • The + * Client timeouts and resubmit the application. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC2) and the appId. • The State + * Store replies with the selected SubCluster (e.g. SC1). • The Router submits + * the request to the selected SubCluster (e.g. SC1). When a client re-submits + * the same application to the same RM, it does not raise an exception and + * replies with operation successful message. + * + * In case of Client failure: identical behavior as {@code ClientRMService}. + * + * In case of ResourceManager failure: + * + * The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • The Router + * submits the request to the selected SubCluster. • The entire SubCluster is + * down – all the RMs in HA or the master RM is not reachable. • The Router + * times out. • The Router selects a new SubCluster to forward the request. • + * The Router update a tuple into State Store with the selected SubCluster + * (e.g. SC2) and the appId. • The State Store replies with OK answer. • The + * Router submits the request to the selected SubCluster (e.g. SC2). + */ + @Override + public SubmitApplicationResponse submitApplication( + SubmitApplicationRequest request) throws YarnException, IOException { + if (request == null || request.getApplicationSubmissionContext() == null + || request.getApplicationSubmissionContext() + .getApplicationId() == null) { + RouterServerUtil + .logAndThrowException("Missing submitApplication request or " + + "applicationSubmissionContex information.", null); + } + + ApplicationId applicationId = + request.getApplicationSubmissionContext().getApplicationId(); + + List blacklist = new ArrayList(); + + for (int i = 0; i < numSubmitRetries; ++i) { + + SubClusterId subClusterId = policyFacade.getHomeSubcluster( + request.getApplicationSubmissionContext(), blacklist); + LOG.info("submitApplication appId" + applicationId + " try #" + i + + " on SubCluster " + subClusterId); + + ApplicationHomeSubCluster appHomeSubCluster = + ApplicationHomeSubCluster.newInstance(applicationId, subClusterId); + + if (i == 0) { + try { + // persist the mapping of applicationId and the subClusterId which has + // been selected as its home + subClusterId = + federationFacade.addApplicationHomeSubCluster(appHomeSubCluster); + } catch (YarnException e) { + String message = "Unable to insert the ApplicationId " + applicationId + + " into the FederationStateStore"; + RouterServerUtil.logAndThrowException(message, e); + } + } else { + try { + // update the mapping of applicationId and the home subClusterId to + // the new subClusterId we have selected + federationFacade.updateApplicationHomeSubCluster(appHomeSubCluster); + } catch (YarnException e) { + String message = "Unable to update the ApplicationId " + applicationId + + " into the FederationStateStore"; + SubClusterId subClusterIdInStateStore = + federationFacade.getApplicationHomeSubCluster(applicationId); + if (subClusterId == subClusterIdInStateStore) { + LOG.info("Application " + applicationId + + " already submitted on SubCluster " + subClusterId); + } else { + RouterServerUtil.logAndThrowException(message, e); + } + } + } + + ApplicationClientProtocol clientRMProxy = + getClientRMProxyForSubCluster(subClusterId); + + SubmitApplicationResponse response = null; + try { + response = clientRMProxy.submitApplication(request); + } catch (Exception e) { + LOG.warn("Unable to submit the application " + applicationId + + "to SubCluster " + subClusterId.getId(), e); + } + + if (response != null) { + LOG.info("Application " + + request.getApplicationSubmissionContext().getApplicationName() + + " with appId " + applicationId + " submitted on " + subClusterId); + return response; + } else { + // Empty response from the ResourceManager. + // Blacklist this subcluster for this request. + blacklist.add(subClusterId); + } + } + + String errMsg = "Application " + + request.getApplicationSubmissionContext().getApplicationName() + + " with appId " + applicationId + " failed to be submitted."; + LOG.error(errMsg); + throw new YarnException(errMsg); + } + + /** + * The Yarn Router will forward to the respective Yarn RM in which the AM is + * running. + * + * Possible failures and behaviors: + * + * Client: identical behavior as {@code ClientRMService}. + * + * Router: the Client will timeout and resubmit the request. + * + * ResourceManager: the Router will timeout and the call will fail. + * + * State Store: the Router will timeout and it will retry depending on the + * FederationFacade settings - if the failure happened before the select + * operation. + */ + @Override + public KillApplicationResponse forceKillApplication( + KillApplicationRequest request) throws YarnException, IOException { + + if (request == null || request.getApplicationId() == null) { + RouterServerUtil.logAndThrowException( + "Missing forceKillApplication request or ApplicationId.", null); + } + ApplicationId applicationId = request.getApplicationId(); + SubClusterId subClusterId = null; + + try { + subClusterId = federationFacade + .getApplicationHomeSubCluster(request.getApplicationId()); + } catch (YarnException e) { + RouterServerUtil.logAndThrowException("Application " + applicationId + + " does not exist in FederationStateStore", e); + } + + ApplicationClientProtocol clientRMProxy = + getClientRMProxyForSubCluster(subClusterId); + + KillApplicationResponse response = null; + try { + LOG.info("forceKillApplication " + applicationId + " on SubCluster " + + subClusterId); + response = clientRMProxy.forceKillApplication(request); + } catch (Exception e) { + LOG.error("Unable to kill the application report for " + + request.getApplicationId() + "to SubCluster " + + subClusterId.getId(), e); + throw e; + } + + if (response == null) { + LOG.error("No response when attempting to kill the application " + + applicationId + " to SubCluster " + subClusterId.getId()); + } + + return response; + } + + /** + * The Yarn Router will forward to the respective Yarn RM in which the AM is + * running. + * + * Possible failure: + * + * Client: identical behavior as {@code ClientRMService}. + * + * Router: the Client will timeout and resubmit the request. + * + * ResourceManager: the Router will timeout and the call will fail. + * + * State Store: the Router will timeout and it will retry depending on the + * FederationFacade settings - if the failure happened before the select + * operation. + */ + @Override + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException { + + if (request == null || request.getApplicationId() == null) { + RouterServerUtil.logAndThrowException( + "Missing getApplicationReport request or applicationId information.", + null); + } + + SubClusterId subClusterId = null; + + try { + subClusterId = federationFacade + .getApplicationHomeSubCluster(request.getApplicationId()); + } catch (YarnException e) { + RouterServerUtil + .logAndThrowException("Application " + request.getApplicationId() + + " does not exist in FederationStateStore", e); + } + + ApplicationClientProtocol clientRMProxy = + getClientRMProxyForSubCluster(subClusterId); + + GetApplicationReportResponse response = null; + try { + response = clientRMProxy.getApplicationReport(request); + } catch (Exception e) { + LOG.error("Unable to get the application report for " + + request.getApplicationId() + "to SubCluster " + + subClusterId.getId(), e); + throw e; + } + + if (response == null) { + LOG.error("No response when attempting to retrieve the report of " + + "the application " + request.getApplicationId() + " to SubCluster " + + subClusterId.getId()); + } + + return response; + } + + @Override + public GetApplicationsResponse getApplications(GetApplicationsRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetClusterMetricsResponse getClusterMetrics( + GetClusterMetricsRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetQueueUserAclsInfoResponse getQueueUserAcls( + GetQueueUserAclsInfoRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( + MoveApplicationAcrossQueuesRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetNewReservationResponse getNewReservation( + GetNewReservationRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public ReservationSubmissionResponse submitReservation( + ReservationSubmissionRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public ReservationListResponse listReservations( + ReservationListRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public ReservationUpdateResponse updateReservation( + ReservationUpdateRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public ReservationDeleteResponse deleteReservation( + ReservationDeleteRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetNodesToLabelsResponse getNodeToLabels( + GetNodesToLabelsRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetLabelsToNodesResponse getLabelsToNodes( + GetLabelsToNodesRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetClusterNodeLabelsResponse getClusterNodeLabels( + GetClusterNodeLabelsRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public FailApplicationAttemptResponse failApplicationAttempt( + FailApplicationAttemptRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public UpdateApplicationPriorityResponse updateApplicationPriority( + UpdateApplicationPriorityRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public SignalContainerResponse signalToContainer( + SignalContainerRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( + UpdateApplicationTimeoutsRequest request) + throws YarnException, IOException { + throw new NotImplementedException(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java new file mode 100644 index 00000000000..fd2c610c7fe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java @@ -0,0 +1,546 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.util.LRUCacheHashMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * RouterClientRMService is a service that runs on each router that can be used + * to intercept and inspect {@link ApplicationClientProtocol} messages from + * client to the cluster resource manager. It listens + * {@link ApplicationClientProtocol} messages from the client and creates a + * request intercepting pipeline instance for each client. The pipeline is a + * chain of {@link ClientRequestInterceptor} instances that can inspect and + * modify the request/response as needed. The main difference with + * AMRMProxyService is the protocol they implement. + */ +public class RouterClientRMService extends AbstractService + implements ApplicationClientProtocol { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterClientRMService.class); + + private Server server; + private InetSocketAddress listenerEndpoint; + + // For each user we store an interceptors' pipeline. + // For performance issue we use LRU cache to keep in memory the newest ones + // and remove the oldest used ones. + private Map userPipelineMap; + + public RouterClientRMService() { + super(RouterClientRMService.class.getName()); + } + + @Override + protected void serviceStart() throws Exception { + LOG.info("Starting Router ClientRMService"); + Configuration conf = getConfig(); + YarnRPC rpc = YarnRPC.create(conf); + UserGroupInformation.setConfiguration(conf); + + this.listenerEndpoint = + conf.getSocketAddr(YarnConfiguration.ROUTER_BIND_HOST, + YarnConfiguration.ROUTER_CLIENTRM_ADDRESS, + YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_ADDRESS, + YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_PORT); + + int maxCacheSize = + conf.getInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE, + YarnConfiguration.DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE); + this.userPipelineMap = Collections.synchronizedMap( + new LRUCacheHashMap( + maxCacheSize, true)); + + Configuration serverConf = new Configuration(conf); + + int numWorkerThreads = + serverConf.getInt(YarnConfiguration.RM_CLIENT_THREAD_COUNT, + YarnConfiguration.DEFAULT_RM_CLIENT_THREAD_COUNT); + + this.server = rpc.getServer(ApplicationClientProtocol.class, this, + listenerEndpoint, serverConf, null, numWorkerThreads); + + this.server.start(); + LOG.info("Router ClientRMService listening on address: " + + this.server.getListenerAddress()); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + LOG.info("Stopping Router ClientRMService"); + if (this.server != null) { + this.server.stop(); + } + userPipelineMap.clear(); + super.serviceStop(); + } + + /** + * Returns the comma separated intercepter class names from the configuration. + * + * @param conf + * @return the intercepter class names as an instance of ArrayList + */ + private List getInterceptorClassNames(Configuration conf) { + String configuredInterceptorClassNames = + conf.get(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, + YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS); + + List interceptorClassNames = new ArrayList(); + Collection tempList = + StringUtils.getStringCollection(configuredInterceptorClassNames); + for (String item : tempList) { + interceptorClassNames.add(item.trim()); + } + + return interceptorClassNames; + } + + @Override + public GetNewApplicationResponse getNewApplication( + GetNewApplicationRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getNewApplication(request); + } + + @Override + public SubmitApplicationResponse submitApplication( + SubmitApplicationRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().submitApplication(request); + } + + @Override + public KillApplicationResponse forceKillApplication( + KillApplicationRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().forceKillApplication(request); + } + + @Override + public GetClusterMetricsResponse getClusterMetrics( + GetClusterMetricsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getClusterMetrics(request); + } + + @Override + public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getClusterNodes(request); + } + + @Override + public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getQueueInfo(request); + } + + @Override + public GetQueueUserAclsInfoResponse getQueueUserAcls( + GetQueueUserAclsInfoRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getQueueUserAcls(request); + } + + @Override + public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( + MoveApplicationAcrossQueuesRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().moveApplicationAcrossQueues(request); + } + + @Override + public GetNewReservationResponse getNewReservation( + GetNewReservationRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getNewReservation(request); + } + + @Override + public ReservationSubmissionResponse submitReservation( + ReservationSubmissionRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().submitReservation(request); + } + + @Override + public ReservationListResponse listReservations( + ReservationListRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().listReservations(request); + } + + @Override + public ReservationUpdateResponse updateReservation( + ReservationUpdateRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateReservation(request); + } + + @Override + public ReservationDeleteResponse deleteReservation( + ReservationDeleteRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().deleteReservation(request); + } + + @Override + public GetNodesToLabelsResponse getNodeToLabels( + GetNodesToLabelsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getNodeToLabels(request); + } + + @Override + public GetLabelsToNodesResponse getLabelsToNodes( + GetLabelsToNodesRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getLabelsToNodes(request); + } + + @Override + public GetClusterNodeLabelsResponse getClusterNodeLabels( + GetClusterNodeLabelsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getClusterNodeLabels(request); + } + + @Override + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getApplicationReport(request); + } + + @Override + public GetApplicationsResponse getApplications(GetApplicationsRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getApplications(request); + } + + @Override + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getApplicationAttemptReport(request); + } + + @Override + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getApplicationAttempts(request); + } + + @Override + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getContainerReport(request); + } + + @Override + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getContainers(request); + } + + @Override + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getDelegationToken(request); + } + + @Override + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().renewDelegationToken(request); + } + + @Override + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().cancelDelegationToken(request); + } + + @Override + public FailApplicationAttemptResponse failApplicationAttempt( + FailApplicationAttemptRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().failApplicationAttempt(request); + } + + @Override + public UpdateApplicationPriorityResponse updateApplicationPriority( + UpdateApplicationPriorityRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateApplicationPriority(request); + } + + @Override + public SignalContainerResponse signalToContainer( + SignalContainerRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().signalToContainer(request); + } + + @Override + public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( + UpdateApplicationTimeoutsRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateApplicationTimeouts(request); + } + + private RequestInterceptorChainWrapper getInterceptorChain() + throws IOException { + String user = UserGroupInformation.getCurrentUser().getUserName(); + if (!userPipelineMap.containsKey(user)) { + initializePipeline(user); + } + return userPipelineMap.get(user); + } + + /** + * Gets the Request intercepter chains for all the users. + * + * @return the request intercepter chains. + */ + @VisibleForTesting + protected Map getPipelines() { + return this.userPipelineMap; + } + + /** + * This method creates and returns reference of the first intercepter in the + * chain of request intercepter instances. + * + * @return the reference of the first intercepter in the chain + */ + @VisibleForTesting + protected ClientRequestInterceptor createRequestInterceptorChain() { + Configuration conf = getConfig(); + + List interceptorClassNames = getInterceptorClassNames(conf); + + ClientRequestInterceptor pipeline = null; + ClientRequestInterceptor current = null; + for (String interceptorClassName : interceptorClassNames) { + try { + Class interceptorClass = conf.getClassByName(interceptorClassName); + if (ClientRequestInterceptor.class.isAssignableFrom(interceptorClass)) { + ClientRequestInterceptor interceptorInstance = + (ClientRequestInterceptor) ReflectionUtils + .newInstance(interceptorClass, conf); + if (pipeline == null) { + pipeline = interceptorInstance; + current = interceptorInstance; + continue; + } else { + current.setNextInterceptor(interceptorInstance); + current = interceptorInstance; + } + } else { + throw new YarnRuntimeException( + "Class: " + interceptorClassName + " not instance of " + + ClientRequestInterceptor.class.getCanonicalName()); + } + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException( + "Could not instantiate ApplicationClientRequestInterceptor: " + + interceptorClassName, + e); + } + } + + if (pipeline == null) { + throw new YarnRuntimeException( + "RequestInterceptor pipeline is not configured in the system"); + } + return pipeline; + } + + /** + * Initializes the request intercepter pipeline for the specified application. + * + * @param user + */ + private void initializePipeline(String user) { + RequestInterceptorChainWrapper chainWrapper = null; + synchronized (this.userPipelineMap) { + if (this.userPipelineMap.containsKey(user)) { + LOG.info("Request to start an already existing user: {}" + + " was received, so ignoring.", user); + return; + } + + chainWrapper = new RequestInterceptorChainWrapper(); + this.userPipelineMap.put(user, chainWrapper); + } + + // We register the pipeline instance in the map first and then initialize it + // later because chain initialization can be expensive and we would like to + // release the lock as soon as possible to prevent other applications from + // blocking when one application's chain is initializing + LOG.info("Initializing request processing pipeline for application " + + "for the user: {}", user); + + try { + ClientRequestInterceptor interceptorChain = + this.createRequestInterceptorChain(); + interceptorChain.init(user); + chainWrapper.init(interceptorChain); + } catch (Exception e) { + synchronized (this.userPipelineMap) { + this.userPipelineMap.remove(user); + } + throw e; + } + } + + /** + * Private structure for encapsulating RequestInterceptor and user instances. + * + */ + @Private + public static class RequestInterceptorChainWrapper { + private ClientRequestInterceptor rootInterceptor; + + /** + * Initializes the wrapper with the specified parameters. + * + * @param interceptor the first interceptor in the pipeline + */ + public synchronized void init(ClientRequestInterceptor interceptor) { + this.rootInterceptor = interceptor; + } + + /** + * Gets the root request intercepter. + * + * @return the root request intercepter + */ + public synchronized ClientRequestInterceptor getRootInterceptor() { + return rootInterceptor; + } + + /** + * Shutdown the chain of interceptors when the object is destroyed. + */ + @Override + protected void finalize() { + rootInterceptor.shutdown(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/package-info.java new file mode 100644 index 00000000000..7d1dadd373b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Router ClientRM Proxy Service package. **/ +package org.apache.hadoop.yarn.server.router.clientrm; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/package-info.java new file mode 100644 index 00000000000..bca1f640104 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Router Server package. **/ +package org.apache.hadoop.yarn.server.router; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/AbstractRMAdminRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/AbstractRMAdminRequestInterceptor.java new file mode 100644 index 00000000000..a4972fcb9ad --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/AbstractRMAdminRequestInterceptor.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import org.apache.hadoop.conf.Configuration; + +/** + * Implements the {@link RMAdminRequestInterceptor} interface and provides + * common functionality which can can be used and/or extended by other concrete + * intercepter classes. + * + */ +public abstract class AbstractRMAdminRequestInterceptor + implements RMAdminRequestInterceptor { + private Configuration conf; + private RMAdminRequestInterceptor nextInterceptor; + + /** + * Sets the {@link RMAdminRequestInterceptor} in the chain. + */ + @Override + public void setNextInterceptor(RMAdminRequestInterceptor nextInterceptor) { + this.nextInterceptor = nextInterceptor; + } + + /** + * Sets the {@link Configuration}. + */ + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + if (this.nextInterceptor != null) { + this.nextInterceptor.setConf(conf); + } + } + + /** + * Gets the {@link Configuration}. + */ + @Override + public Configuration getConf() { + return this.conf; + } + + /** + * Initializes the {@link RMAdminRequestInterceptor}. + */ + @Override + public void init(String user) { + if (this.nextInterceptor != null) { + this.nextInterceptor.init(user); + } + } + + /** + * Disposes the {@link RMAdminRequestInterceptor}. + */ + @Override + public void shutdown() { + if (this.nextInterceptor != null) { + this.nextInterceptor.shutdown(); + } + } + + /** + * Gets the next {@link RMAdminRequestInterceptor} in the chain. + */ + @Override + public RMAdminRequestInterceptor getNextInterceptor() { + return this.nextInterceptor; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java new file mode 100644 index 00000000000..7e6a1ff28d3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/DefaultRMAdminRequestInterceptor.java @@ -0,0 +1,215 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Extends the {@link AbstractRMAdminRequestInterceptor} class and provides an + * implementation that simply forwards the client requests to the cluster + * resource manager. + * + */ +public class DefaultRMAdminRequestInterceptor + extends AbstractRMAdminRequestInterceptor { + private static final Logger LOG = + LoggerFactory.getLogger(DefaultRMAdminRequestInterceptor.class); + private ResourceManagerAdministrationProtocol rmAdminProxy; + private UserGroupInformation user = null; + + @Override + public void init(String userName) { + super.init(userName); + try { + // Do not create a proxy user if user name matches the user name on + // current UGI + if (userName.equalsIgnoreCase( + UserGroupInformation.getCurrentUser().getUserName())) { + user = UserGroupInformation.getCurrentUser(); + } else { + user = UserGroupInformation.createProxyUser(userName, + UserGroupInformation.getCurrentUser()); + } + + final Configuration conf = this.getConf(); + + rmAdminProxy = user.doAs( + new PrivilegedExceptionAction() { + @Override + public ResourceManagerAdministrationProtocol run() + throws Exception { + return ClientRMProxy.createRMProxy(conf, + ResourceManagerAdministrationProtocol.class); + } + }); + } catch (IOException e) { + String message = "Error while creating Router RMAdmin Service for user:"; + if (user != null) { + message += ", user: " + user; + } + + LOG.info(message); + throw new YarnRuntimeException(message, e); + } catch (Exception e) { + throw new YarnRuntimeException(e); + } + } + + @Override + public void setNextInterceptor(RMAdminRequestInterceptor next) { + throw new YarnRuntimeException("setNextInterceptor is being called on " + + "DefaultRMAdminRequestInterceptor, which should be the last one " + + "in the chain. Check if the interceptor pipeline configuration " + + "is correct"); + } + + @VisibleForTesting + public void setRMAdmin(ResourceManagerAdministrationProtocol rmAdmin) { + this.rmAdminProxy = rmAdmin; + + } + + @Override + public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) + throws StandbyException, YarnException, IOException { + return rmAdminProxy.refreshQueues(request); + } + + @Override + public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) + throws StandbyException, YarnException, IOException { + return rmAdminProxy.refreshNodes(request); + } + + @Override + public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( + RefreshSuperUserGroupsConfigurationRequest request) + throws StandbyException, YarnException, IOException { + return rmAdminProxy.refreshSuperUserGroupsConfiguration(request); + } + + @Override + public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( + RefreshUserToGroupsMappingsRequest request) + throws StandbyException, YarnException, IOException { + return rmAdminProxy.refreshUserToGroupsMappings(request); + } + + @Override + public RefreshAdminAclsResponse refreshAdminAcls( + RefreshAdminAclsRequest request) throws YarnException, IOException { + return rmAdminProxy.refreshAdminAcls(request); + } + + @Override + public RefreshServiceAclsResponse refreshServiceAcls( + RefreshServiceAclsRequest request) throws YarnException, IOException { + return rmAdminProxy.refreshServiceAcls(request); + } + + @Override + public UpdateNodeResourceResponse updateNodeResource( + UpdateNodeResourceRequest request) throws YarnException, IOException { + return rmAdminProxy.updateNodeResource(request); + } + + @Override + public RefreshNodesResourcesResponse refreshNodesResources( + RefreshNodesResourcesRequest request) throws YarnException, IOException { + return rmAdminProxy.refreshNodesResources(request); + } + + @Override + public AddToClusterNodeLabelsResponse addToClusterNodeLabels( + AddToClusterNodeLabelsRequest request) throws YarnException, IOException { + return rmAdminProxy.addToClusterNodeLabels(request); + } + + @Override + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) + throws YarnException, IOException { + return rmAdminProxy.removeFromClusterNodeLabels(request); + } + + @Override + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException { + return rmAdminProxy.replaceLabelsOnNode(request); + } + + @Override + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws YarnException, IOException { + return rmAdminProxy + .checkForDecommissioningNodes(checkForDecommissioningNodesRequest); + } + + @Override + public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority( + RefreshClusterMaxPriorityRequest request) + throws YarnException, IOException { + return rmAdminProxy.refreshClusterMaxPriority(request); + } + + @Override + public String[] getGroupsForUser(String userName) throws IOException { + return rmAdminProxy.getGroupsForUser(userName); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RMAdminRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RMAdminRequestInterceptor.java new file mode 100644 index 00000000000..dc4bda01b90 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RMAdminRequestInterceptor.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; + +/** + * Defines the contract to be implemented by the request intercepter classes, + * that can be used to intercept and inspect messages sent from the client to + * the resource manager. + */ +public interface RMAdminRequestInterceptor + extends ResourceManagerAdministrationProtocol, Configurable { + /** + * This method is called for initializing the intercepter. This is guaranteed + * to be called only once in the lifetime of this instance. + * + * @param user the name of the client + */ + void init(String user); + + /** + * This method is called to release the resources held by the intercepter. + * This will be called when the application pipeline is being destroyed. The + * concrete implementations should dispose the resources and forward the + * request to the next intercepter, if any. + */ + void shutdown(); + + /** + * Sets the next intercepter in the pipeline. The concrete implementation of + * this interface should always pass the request to the nextInterceptor after + * inspecting the message. The last intercepter in the chain is responsible to + * send the messages to the resource manager service and so the last + * intercepter will not receive this method call. + * + * @param nextInterceptor the RMAdminRequestInterceptor to set in the pipeline + */ + void setNextInterceptor(RMAdminRequestInterceptor nextInterceptor); + + /** + * Returns the next intercepter in the chain. + * + * @return the next intercepter in the chain + */ + RMAdminRequestInterceptor getNextInterceptor(); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java new file mode 100644 index 00000000000..b8b7ad818f3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java @@ -0,0 +1,423 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.util.LRUCacheHashMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * RouterRMAdminService is a service that runs on each router that can be used + * to intercept and inspect {@code ResourceManagerAdministrationProtocol} + * messages from client to the cluster resource manager. It listens + * {@code ResourceManagerAdministrationProtocol} messages from the client and + * creates a request intercepting pipeline instance for each client. The + * pipeline is a chain of intercepter instances that can inspect and modify the + * request/response as needed. The main difference with AMRMProxyService is the + * protocol they implement. + */ +public class RouterRMAdminService extends AbstractService + implements ResourceManagerAdministrationProtocol { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterRMAdminService.class); + + private Server server; + private InetSocketAddress listenerEndpoint; + + // For each user we store an interceptors' pipeline. + // For performance issue we use LRU cache to keep in memory the newest ones + // and remove the oldest used ones. + private Map userPipelineMap; + + public RouterRMAdminService() { + super(RouterRMAdminService.class.getName()); + } + + @Override + protected void serviceStart() throws Exception { + LOG.info("Starting Router RMAdmin Service"); + Configuration conf = getConfig(); + YarnRPC rpc = YarnRPC.create(conf); + UserGroupInformation.setConfiguration(conf); + + this.listenerEndpoint = + conf.getSocketAddr(YarnConfiguration.ROUTER_BIND_HOST, + YarnConfiguration.ROUTER_RMADMIN_ADDRESS, + YarnConfiguration.DEFAULT_ROUTER_RMADMIN_ADDRESS, + YarnConfiguration.DEFAULT_ROUTER_RMADMIN_PORT); + + int maxCacheSize = + conf.getInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE, + YarnConfiguration.DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE); + this.userPipelineMap = Collections.synchronizedMap( + new LRUCacheHashMap( + maxCacheSize, true)); + + Configuration serverConf = new Configuration(conf); + + int numWorkerThreads = + serverConf.getInt(YarnConfiguration.RM_ADMIN_CLIENT_THREAD_COUNT, + YarnConfiguration.DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT); + + this.server = rpc.getServer(ResourceManagerAdministrationProtocol.class, + this, listenerEndpoint, serverConf, null, numWorkerThreads); + + this.server.start(); + LOG.info("Router RMAdminService listening on address: " + + this.server.getListenerAddress()); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + LOG.info("Stopping Router RMAdminService"); + if (this.server != null) { + this.server.stop(); + } + userPipelineMap.clear(); + super.serviceStop(); + } + + /** + * Returns the comma separated intercepter class names from the configuration. + * + * @param conf + * @return the intercepter class names as an instance of ArrayList + */ + private List getInterceptorClassNames(Configuration conf) { + String configuredInterceptorClassNames = + conf.get(YarnConfiguration.ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE, + YarnConfiguration.DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS); + + List interceptorClassNames = new ArrayList(); + Collection tempList = + StringUtils.getStringCollection(configuredInterceptorClassNames); + for (String item : tempList) { + interceptorClassNames.add(item.trim()); + } + + return interceptorClassNames; + } + + private RequestInterceptorChainWrapper getInterceptorChain() + throws IOException { + String user = UserGroupInformation.getCurrentUser().getUserName(); + if (!userPipelineMap.containsKey(user)) { + initializePipeline(user); + } + return userPipelineMap.get(user); + } + + /** + * Gets the Request intercepter chains for all the users. + * + * @return the request intercepter chains. + */ + @VisibleForTesting + protected Map getPipelines() { + return this.userPipelineMap; + } + + /** + * This method creates and returns reference of the first intercepter in the + * chain of request intercepter instances. + * + * @return the reference of the first intercepter in the chain + */ + @VisibleForTesting + protected RMAdminRequestInterceptor createRequestInterceptorChain() { + Configuration conf = getConfig(); + + List interceptorClassNames = getInterceptorClassNames(conf); + + RMAdminRequestInterceptor pipeline = null; + RMAdminRequestInterceptor current = null; + for (String interceptorClassName : interceptorClassNames) { + try { + Class interceptorClass = conf.getClassByName(interceptorClassName); + if (RMAdminRequestInterceptor.class + .isAssignableFrom(interceptorClass)) { + RMAdminRequestInterceptor interceptorInstance = + (RMAdminRequestInterceptor) ReflectionUtils + .newInstance(interceptorClass, conf); + if (pipeline == null) { + pipeline = interceptorInstance; + current = interceptorInstance; + continue; + } else { + current.setNextInterceptor(interceptorInstance); + current = interceptorInstance; + } + } else { + throw new YarnRuntimeException( + "Class: " + interceptorClassName + " not instance of " + + RMAdminRequestInterceptor.class.getCanonicalName()); + } + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException( + "Could not instantiate RMAdminRequestInterceptor: " + + interceptorClassName, + e); + } + } + + if (pipeline == null) { + throw new YarnRuntimeException( + "RequestInterceptor pipeline is not configured in the system"); + } + return pipeline; + } + + /** + * Initializes the request intercepter pipeline for the specified user. + * + * @param user + */ + private void initializePipeline(String user) { + RequestInterceptorChainWrapper chainWrapper = null; + synchronized (this.userPipelineMap) { + if (this.userPipelineMap.containsKey(user)) { + LOG.info("Request to start an already existing user: {}" + + " was received, so ignoring.", user); + return; + } + + chainWrapper = new RequestInterceptorChainWrapper(); + this.userPipelineMap.put(user, chainWrapper); + } + + // We register the pipeline instance in the map first and then initialize it + // later because chain initialization can be expensive and we would like to + // release the lock as soon as possible to prevent other applications from + // blocking when one application's chain is initializing + LOG.info("Initializing request processing pipeline for the user: {}", user); + + try { + RMAdminRequestInterceptor interceptorChain = + this.createRequestInterceptorChain(); + interceptorChain.init(user); + chainWrapper.init(interceptorChain); + } catch (Exception e) { + synchronized (this.userPipelineMap) { + this.userPipelineMap.remove(user); + } + throw e; + } + } + + /** + * Private structure for encapsulating RequestInterceptor and user instances. + * + */ + @Private + public static class RequestInterceptorChainWrapper { + private RMAdminRequestInterceptor rootInterceptor; + + /** + * Initializes the wrapper with the specified parameters. + * + * @param interceptor the first interceptor in the pipeline + */ + public synchronized void init(RMAdminRequestInterceptor interceptor) { + this.rootInterceptor = interceptor; + } + + /** + * Gets the root request intercepter. + * + * @return the root request intercepter + */ + public synchronized RMAdminRequestInterceptor getRootInterceptor() { + return rootInterceptor; + } + + /** + * Shutdown the chain of interceptors when the object is destroyed. + */ + @Override + protected void finalize() { + rootInterceptor.shutdown(); + } + } + + @Override + public String[] getGroupsForUser(String user) throws IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getGroupsForUser(user); + } + + @Override + public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) + throws StandbyException, YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshQueues(request); + + } + + @Override + public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) + throws StandbyException, YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshNodes(request); + + } + + @Override + public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( + RefreshSuperUserGroupsConfigurationRequest request) + throws StandbyException, YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor() + .refreshSuperUserGroupsConfiguration(request); + + } + + @Override + public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( + RefreshUserToGroupsMappingsRequest request) + throws StandbyException, YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshUserToGroupsMappings(request); + + } + + @Override + public RefreshAdminAclsResponse refreshAdminAcls( + RefreshAdminAclsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshAdminAcls(request); + + } + + @Override + public RefreshServiceAclsResponse refreshServiceAcls( + RefreshServiceAclsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshServiceAcls(request); + + } + + @Override + public UpdateNodeResourceResponse updateNodeResource( + UpdateNodeResourceRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateNodeResource(request); + + } + + @Override + public RefreshNodesResourcesResponse refreshNodesResources( + RefreshNodesResourcesRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshNodesResources(request); + + } + + @Override + public AddToClusterNodeLabelsResponse addToClusterNodeLabels( + AddToClusterNodeLabelsRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().addToClusterNodeLabels(request); + + } + + @Override + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().removeFromClusterNodeLabels(request); + + } + + @Override + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().replaceLabelsOnNode(request); + + } + + @Override + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor() + .checkForDecommissioningNodes(checkForDecommissioningNodesRequest); + } + + @Override + public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority( + RefreshClusterMaxPriorityRequest request) + throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().refreshClusterMaxPriority(request); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/package-info.java new file mode 100644 index 00000000000..98a7ed0841c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Router RM Admin Proxy Service package. **/ +package org.apache.hadoop.yarn.server.router.rmadmin; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AbstractRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AbstractRESTRequestInterceptor.java new file mode 100644 index 00000000000..a2d78a479ab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AbstractRESTRequestInterceptor.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import org.apache.hadoop.conf.Configuration; + +/** + * Extends the RequestInterceptor class and provides common functionality which + * can be used and/or extended by other concrete intercepter classes. + */ +public abstract class AbstractRESTRequestInterceptor + implements RESTRequestInterceptor { + + private Configuration conf; + private RESTRequestInterceptor nextInterceptor; + + /** + * Sets the {@link RESTRequestInterceptor} in the chain. + */ + @Override + public void setNextInterceptor(RESTRequestInterceptor nextInterceptor) { + this.nextInterceptor = nextInterceptor; + } + + /** + * Sets the {@link Configuration}. + */ + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + if (this.nextInterceptor != null) { + this.nextInterceptor.setConf(conf); + } + } + + /** + * Gets the {@link Configuration}. + */ + @Override + public Configuration getConf() { + return this.conf; + } + + /** + * Initializes the {@link RESTRequestInterceptor}. + */ + @Override + public void init(String user) { + if (this.nextInterceptor != null) { + this.nextInterceptor.init(user); + } + } + + /** + * Disposes the {@link RESTRequestInterceptor}. + */ + @Override + public void shutdown() { + if (this.nextInterceptor != null) { + this.nextInterceptor.shutdown(); + } + } + + /** + * Gets the next {@link RESTRequestInterceptor} in the chain. + */ + @Override + public RESTRequestInterceptor getNextInterceptor() { + return this.nextInterceptor; + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java new file mode 100644 index 00000000000..abd8ca6ec10 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java @@ -0,0 +1,510 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; + +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; + +/** + * Extends the AbstractRequestInterceptorClient class and provides an + * implementation that simply forwards the client requests to the resource + * manager. + */ +public class DefaultRequestInterceptorREST + extends AbstractRESTRequestInterceptor { + + private String webAppAddress; + private SubClusterId subClusterId = null; + + public void setWebAppAddress(String webAppAddress) { + this.webAppAddress = webAppAddress; + } + + protected void setSubClusterId(SubClusterId scId) { + this.subClusterId = scId; + } + + protected SubClusterId getSubClusterId() { + return this.subClusterId; + } + + @Override + public void init(String user) { + webAppAddress = WebAppUtils.getRMWebAppURLWithScheme(getConf()); + } + + @Override + public ClusterInfo get() { + return getClusterInfo(); + } + + @Override + public ClusterInfo getClusterInfo() { + return RouterWebServiceUtil.genericForward(webAppAddress, null, + ClusterInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.INFO, null, null); + } + + @Override + public ClusterMetricsInfo getClusterMetricsInfo() { + return RouterWebServiceUtil.genericForward(webAppAddress, null, + ClusterMetricsInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.METRICS, null, null); + } + + @Override + public SchedulerTypeInfo getSchedulerInfo() { + return RouterWebServiceUtil.genericForward(webAppAddress, null, + SchedulerTypeInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER, null, null); + } + + @Override + public String dumpSchedulerLogs(String time, HttpServletRequest hsr) + throws IOException { + // time is specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, null, + String.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_LOGS, null, null); + } + + @Override + public NodesInfo getNodes(String states) { + // states will be part of additionalParam + Map additionalParam = new HashMap(); + additionalParam.put(RMWSConsts.STATES, new String[] {states}); + return RouterWebServiceUtil.genericForward(webAppAddress, null, + NodesInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES, null, + additionalParam); + } + + @Override + public NodeInfo getNode(String nodeId) { + return RouterWebServiceUtil.genericForward(webAppAddress, null, + NodeInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES + "/" + nodeId, null, + null); + } + + @Override + public AppsInfo getApps(HttpServletRequest hsr, String stateQuery, + Set statesQuery, String finalStatusQuery, String userQuery, + String queueQuery, String count, String startedBegin, String startedEnd, + String finishBegin, String finishEnd, Set applicationTypes, + Set applicationTags, Set unselectedFields) { + // all the params are specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppsInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS, null, null); + } + + @Override + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { + // nodeId is specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + ActivitiesInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_ACTIVITIES, null, + null); + } + + @Override + public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, + String appId, String time) { + // time and appId are specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppActivitiesInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_APP_ACTIVITIES, + null, null); + } + + @Override + public ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr, + Set stateQueries, Set typeQueries) { + // stateQueries and typeQueries are specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + ApplicationStatisticsInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APP_STATISTICS, null, null); + } + + @Override + public AppInfo getApp(HttpServletRequest hsr, String appId, + Set unselectedFields) { + // unselectedFields is specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId, null, + null); + } + + @Override + public AppState getAppState(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppState.class, HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.STATE, + null, null); + } + + @Override + public Response updateAppState(AppState targetState, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.PUT, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.STATE, + targetState, null); + } + + @Override + public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) + throws IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + NodeToLabelsInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.GET_NODE_TO_LABELS, null, + null); + } + + @Override + public LabelsToNodesInfo getLabelsToNodes(Set labels) + throws IOException { + // labels will be part of additionalParam + Map additionalParam = new HashMap(); + additionalParam.put(RMWSConsts.LABELS, + labels.toArray(new String[labels.size()])); + return RouterWebServiceUtil.genericForward(webAppAddress, null, + LabelsToNodesInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.LABEL_MAPPINGS, null, + additionalParam); + } + + @Override + public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, + HttpServletRequest hsr) throws IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.REPLACE_NODE_TO_LABELS, + newNodeToLabels, null); + } + + @Override + public Response replaceLabelsOnNode(Set newNodeLabelsName, + HttpServletRequest hsr, String nodeId) throws Exception { + // newNodeLabelsName is specified inside hsr + return RouterWebServiceUtil + .genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.NODES + "/" + nodeId + "/replace-labels", + null, null); + } + + @Override + public NodeLabelsInfo getClusterNodeLabels(HttpServletRequest hsr) + throws IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + NodeLabelsInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.GET_NODE_LABELS, null, + null); + } + + @Override + public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, + HttpServletRequest hsr) throws Exception { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.ADD_NODE_LABELS, + newNodeLabels, null); + } + + @Override + public Response removeFromCluserNodeLabels(Set oldNodeLabels, + HttpServletRequest hsr) throws Exception { + // oldNodeLabels is specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.REMOVE_NODE_LABELS, null, + null); + } + + @Override + public NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId) + throws IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + NodeLabelsInfo.class, HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.NODES + "/" + nodeId + "/get-labels", + null, null); + } + + @Override + public AppPriority getAppPriority(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppPriority.class, HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.PRIORITY, + null, null); + } + + @Override + public Response updateApplicationPriority(AppPriority targetPriority, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.PUT, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.PRIORITY, + targetPriority, null); + } + + @Override + public AppQueue getAppQueue(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppQueue.class, HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.QUEUE, + null, null); + } + + @Override + public Response updateAppQueue(AppQueue targetQueue, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.PUT, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.QUEUE, + targetQueue, null); + } + + @Override + public Response createNewApplication(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS_NEW_APPLICATION, null, + null); + } + + @Override + public Response submitApplication(ApplicationSubmissionContextInfo newApp, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS, newApp, null); + } + + @Override + public Response postDelegationToken(DelegationToken tokenData, + HttpServletRequest hsr) throws AuthorizationException, IOException, + InterruptedException, Exception { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.DELEGATION_TOKEN, tokenData, + null); + } + + @Override + public Response postDelegationTokenExpiration(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.DELEGATION_TOKEN_EXPIRATION, + null, null); + } + + @Override + public Response cancelDelegationToken(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.DELETE, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.DELEGATION_TOKEN, null, + null); + } + + @Override + public Response createNewReservation(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_NEW, null, + null); + } + + @Override + public Response submitReservation(ReservationSubmissionRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_SUBMIT, + resContext, null); + } + + @Override + public Response updateReservation(ReservationUpdateRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_UPDATE, + resContext, null); + } + + @Override + public Response deleteReservation(ReservationDeleteRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.POST, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_DELETE, + resContext, null); + } + + @Override + public Response listReservation(String queue, String reservationId, + long startTime, long endTime, boolean includeResourceAllocations, + HttpServletRequest hsr) throws Exception { + // queue, reservationId, startTime, endTime, includeResourceAllocations are + // specified inside hsr + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_LIST, null, + null); + } + + @Override + public AppTimeoutInfo getAppTimeout(HttpServletRequest hsr, String appId, + String type) throws AuthorizationException { + return RouterWebServiceUtil + .genericForward(webAppAddress, hsr, AppTimeoutInfo.class, + HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + + "/" + appId + "/" + RMWSConsts.TIMEOUTS + "/" + type, + null, null); + } + + @Override + public AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppTimeoutsInfo.class, HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.TIMEOUTS, + null, null); + } + + @Override + public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + Response.class, HTTPMethods.PUT, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.TIMEOUT, + appTimeout, null); + } + + @Override + public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) { + return RouterWebServiceUtil.genericForward(webAppAddress, hsr, + AppAttemptsInfo.class, HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.APPATTEMPTS, + null, null); + } + + @Override + public AppAttemptInfo getAppAttempt(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + return RouterWebServiceUtil.genericForward(webAppAddress, req, + AppAttemptInfo.class, + HTTPMethods.GET, RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + + appId + "/" + RMWSConsts.APPATTEMPTS + "/" + appAttemptId, + null, null); + } + + @Override + public ContainersInfo getContainers(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + return RouterWebServiceUtil.genericForward(webAppAddress, req, + ContainersInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.APPATTEMPTS + "/" + appAttemptId + "/" + + RMWSConsts.CONTAINERS, + null, null); + } + + @Override + public ContainerInfo getContainer(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId, + String containerId) { + return RouterWebServiceUtil.genericForward(webAppAddress, req, + ContainerInfo.class, HTTPMethods.GET, + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.APPATTEMPTS + "/" + appAttemptId + "/" + + RMWSConsts.CONTAINERS + "/" + containerId, + null, null); + } + + @Override + public void setNextInterceptor(RESTRequestInterceptor next) { + throw new YarnRuntimeException("setNextInterceptor is being called on " + + "DefaultRequestInterceptorREST, which should be the last one " + + "in the chain. Check if the interceptor pipeline configuration " + + "is correct"); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java new file mode 100644 index 00000000000..8ecc19dacbe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -0,0 +1,750 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.RouterPolicyFacade; +import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException; +import org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*; +import org.apache.hadoop.yarn.server.router.RouterServerUtil; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import java.io.IOException; +import java.util.*; + +/** + * Extends the {@code AbstractRESTRequestInterceptor} class and provides an + * implementation for federation of YARN RM and scaling an application across + * multiple YARN SubClusters. All the federation specific implementation is + * encapsulated in this class. This is always the last intercepter in the chain. + */ +public class FederationInterceptorREST extends AbstractRESTRequestInterceptor { + + private static final Logger LOG = + LoggerFactory.getLogger(FederationInterceptorREST.class); + + private int numSubmitRetries; + private FederationStateStoreFacade federationFacade; + private Random rand; + private RouterPolicyFacade policyFacade; + + private Map interceptors; + + @Override + public void init(String user) { + federationFacade = FederationStateStoreFacade.getInstance(); + rand = new Random(System.currentTimeMillis()); + + final Configuration conf = this.getConf(); + + try { + policyFacade = new RouterPolicyFacade(conf, federationFacade, + this.federationFacade.getSubClusterResolver(), null); + } catch (FederationPolicyInitializationException e) { + LOG.error(e.getMessage()); + } + + numSubmitRetries = + conf.getInt(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY, + YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY); + + interceptors = new HashMap(); + } + + private SubClusterId getRandomActiveSubCluster( + Map activeSubclusters, + List blackListSubClusters) throws YarnException { + + if (activeSubclusters == null || activeSubclusters.size() < 1) { + RouterServerUtil.logAndThrowException( + FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE, null); + } + List list = new ArrayList<>(activeSubclusters.keySet()); + + FederationPolicyUtils.validateSubClusterAvailability(list, + blackListSubClusters); + + if (blackListSubClusters != null) { + + // Remove from the active SubClusters from StateStore the blacklisted ones + for (SubClusterId scId : blackListSubClusters) { + list.remove(scId); + } + } + + return list.get(rand.nextInt(list.size())); + } + + @VisibleForTesting + protected DefaultRequestInterceptorREST getInterceptorForSubCluster( + SubClusterId subClusterId) { + if (interceptors.containsKey(subClusterId)) { + return interceptors.get(subClusterId); + } else { + LOG.error("The interceptor for SubCluster " + subClusterId + + " does not exist in the cache."); + return null; + } + } + + private DefaultRequestInterceptorREST createInterceptorForSubCluster( + SubClusterId subClusterId, String webAppAddress) { + + final Configuration conf = this.getConf(); + + String interceptorClassName = + conf.get(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS, + YarnConfiguration.DEFAULT_ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS); + DefaultRequestInterceptorREST interceptorInstance = null; + try { + Class interceptorClass = conf.getClassByName(interceptorClassName); + if (DefaultRequestInterceptorREST.class + .isAssignableFrom(interceptorClass)) { + interceptorInstance = (DefaultRequestInterceptorREST) ReflectionUtils + .newInstance(interceptorClass, conf); + + } else { + throw new YarnRuntimeException( + "Class: " + interceptorClassName + " not instance of " + + DefaultRequestInterceptorREST.class.getCanonicalName()); + } + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException( + "Could not instantiate ApplicationMasterRequestInterceptor: " + + interceptorClassName, + e); + } + + interceptorInstance.setWebAppAddress(webAppAddress); + interceptorInstance.setSubClusterId(subClusterId); + interceptors.put(subClusterId, interceptorInstance); + return interceptorInstance; + } + + @VisibleForTesting + protected DefaultRequestInterceptorREST getOrCreateInterceptorForSubCluster( + SubClusterId subClusterId, String webAppAddress) { + DefaultRequestInterceptorREST interceptor = + getInterceptorForSubCluster(subClusterId); + if (interceptor == null) { + interceptor = createInterceptorForSubCluster(subClusterId, webAppAddress); + } + return interceptor; + } + + /** + * Yarn Router forwards every getNewApplication requests to any RM. During + * this operation there will be no communication with the State Store. The + * Router will forward the requests to any SubCluster. The Router will retry + * to submit the request on #numSubmitRetries different SubClusters. The + * SubClusters are randomly chosen from the active ones. + *

+ * Possible failures and behaviors: + *

+ * Client: identical behavior as {@code RMWebServices}. + *

+ * Router: the Client will timeout and resubmit. + *

+ * ResourceManager: the Router will timeout and contacts another RM. + *

+ * StateStore: not in the execution. + */ + @Override + public Response createNewApplication(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + Map subClustersActive; + try { + subClustersActive = federationFacade.getSubClusters(true); + } catch (YarnException e) { + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(e.getLocalizedMessage()).build(); + } + + List blacklist = new ArrayList(); + + for (int i = 0; i < numSubmitRetries; ++i) { + + SubClusterId subClusterId; + try { + subClusterId = getRandomActiveSubCluster(subClustersActive, blacklist); + } catch (YarnException e) { + return Response.status(Status.SERVICE_UNAVAILABLE) + .entity(e.getLocalizedMessage()).build(); + } + + LOG.debug( + "getNewApplication try #" + i + " on SubCluster " + subClusterId); + + DefaultRequestInterceptorREST interceptor = + getOrCreateInterceptorForSubCluster(subClusterId, + subClustersActive.get(subClusterId).getRMWebServiceAddress()); + Response response = null; + try { + response = interceptor.createNewApplication(hsr); + } catch (Exception e) { + LOG.warn("Unable to create a new ApplicationId in SubCluster " + + subClusterId.getId(), e); + } + + if (response != null && response.getStatus() == 200) { + return response; + } else { + // Empty response from the ResourceManager. + // Blacklist this subcluster for this request. + blacklist.add(subClusterId); + } + } + + String errMsg = "Fail to create a new application."; + LOG.error(errMsg); + return Response.status(Status.INTERNAL_SERVER_ERROR).entity(errMsg).build(); + } + + /** + * Today, in YARN there are no checks of any applicationId submitted. + *

+ * Base scenarios: + *

+ * The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into + * StateStore with the selected SubCluster (e.g. SC1) and the appId. • The + * State Store replies with the selected SubCluster (e.g. SC1). • The Router + * submits the request to the selected SubCluster. + *

+ * In case of State Store failure: + *

+ * The client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • Due to the + * State Store down the Router times out and it will retry depending on the + * FederationFacade settings. • The Router replies to the client with an error + * message. + *

+ * If State Store fails after inserting the tuple: identical behavior as + * {@code RMWebServices}. + *

+ * In case of Router failure: + *

+ * Scenario 1 – Crash before submission to the ResourceManager + *

+ * The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • The Router + * crashes. • The Client timeouts and resubmits the application. • The Router + * selects one SubCluster to forward the request. • The Router inserts a tuple + * into State Store with the selected SubCluster (e.g. SC2) and the appId. • + * Because the tuple is already inserted in the State Store, it returns the + * previous selected SubCluster (e.g. SC1). • The Router submits the request + * to the selected SubCluster (e.g. SC1). + *

+ * Scenario 2 – Crash after submission to the ResourceManager + *

+ * • The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • The Router + * submits the request to the selected SubCluster. • The Router crashes. • The + * Client timeouts and resubmit the application. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC2) and the appId. • The State + * Store replies with the selected SubCluster (e.g. SC1). • The Router submits + * the request to the selected SubCluster (e.g. SC1). When a client re-submits + * the same application to the same RM, it does not raise an exception and + * replies with operation successful message. + *

+ * In case of Client failure: identical behavior as {@code RMWebServices}. + *

+ * In case of ResourceManager failure: + *

+ * The Client submits an application to the Router. • The Router selects one + * SubCluster to forward the request. • The Router inserts a tuple into State + * Store with the selected SubCluster (e.g. SC1) and the appId. • The Router + * submits the request to the selected SubCluster. • The entire SubCluster is + * down – all the RMs in HA or the master RM is not reachable. • The Router + * times out. • The Router selects a new SubCluster to forward the request. • + * The Router update a tuple into State Store with the selected SubCluster + * (e.g. SC2) and the appId. • The State Store replies with OK answer. • The + * Router submits the request to the selected SubCluster (e.g. SC2). + */ + @Override + public Response submitApplication(ApplicationSubmissionContextInfo newApp, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + if (newApp == null || newApp.getApplicationId() == null) { + String errMsg = "Missing ApplicationSubmissionContextInfo or " + + "applicationSubmissionContex information."; + return Response.status(Status.BAD_REQUEST).entity(errMsg).build(); + } + + ApplicationId applicationId = null; + try { + applicationId = ApplicationId.fromString(newApp.getApplicationId()); + } catch (IllegalArgumentException e) { + return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage()) + .build(); + } + + List blacklist = new ArrayList(); + + for (int i = 0; i < numSubmitRetries; ++i) { + + ApplicationSubmissionContext context = + RMWebAppUtil.createAppSubmissionContext(newApp, this.getConf()); + + SubClusterId subClusterId = null; + try { + subClusterId = policyFacade.getHomeSubcluster(context, blacklist); + } catch (YarnException e) { + return Response.status(Status.SERVICE_UNAVAILABLE) + .entity(e.getLocalizedMessage()).build(); + } + LOG.info("submitApplication appId" + applicationId + " try #" + i + + " on SubCluster " + subClusterId); + + ApplicationHomeSubCluster appHomeSubCluster = + ApplicationHomeSubCluster.newInstance(applicationId, subClusterId); + + if (i == 0) { + try { + // persist the mapping of applicationId and the subClusterId which has + // been selected as its home + subClusterId = + federationFacade.addApplicationHomeSubCluster(appHomeSubCluster); + } catch (YarnException e) { + String errMsg = "Unable to insert the ApplicationId " + applicationId + + " into the FederationStateStore"; + return Response.status(Status.SERVICE_UNAVAILABLE) + .entity(errMsg + " " + e.getLocalizedMessage()).build(); + } + } else { + try { + // update the mapping of applicationId and the home subClusterId to + // the new subClusterId we have selected + federationFacade.updateApplicationHomeSubCluster(appHomeSubCluster); + } catch (YarnException e) { + String errMsg = "Unable to update the ApplicationId " + applicationId + + " into the FederationStateStore"; + SubClusterId subClusterIdInStateStore; + try { + subClusterIdInStateStore = + federationFacade.getApplicationHomeSubCluster(applicationId); + } catch (YarnException e1) { + return Response.status(Status.SERVICE_UNAVAILABLE) + .entity(e1.getLocalizedMessage()).build(); + } + if (subClusterId == subClusterIdInStateStore) { + LOG.info("Application " + applicationId + + " already submitted on SubCluster " + subClusterId); + } else { + return Response.status(Status.SERVICE_UNAVAILABLE).entity(errMsg) + .build(); + } + } + } + + SubClusterInfo subClusterInfo; + try { + subClusterInfo = federationFacade.getSubCluster(subClusterId); + } catch (YarnException e) { + return Response.status(Status.SERVICE_UNAVAILABLE) + .entity(e.getLocalizedMessage()).build(); + } + + Response response = null; + try { + response = getOrCreateInterceptorForSubCluster(subClusterId, + subClusterInfo.getRMWebServiceAddress()).submitApplication(newApp, + hsr); + } catch (Exception e) { + LOG.warn("Unable to submit the application " + applicationId + + "to SubCluster " + subClusterId.getId(), e); + } + + if (response != null && response.getStatus() == 202) { + LOG.info("Application " + context.getApplicationName() + " with appId " + + applicationId + " submitted on " + subClusterId); + return response; + } else { + // Empty response from the ResourceManager. + // Blacklist this subcluster for this request. + blacklist.add(subClusterId); + } + } + + String errMsg = "Application " + newApp.getApplicationName() + + " with appId " + applicationId + " failed to be submitted."; + LOG.error(errMsg); + return Response.status(Status.SERVICE_UNAVAILABLE).entity(errMsg).build(); + } + + /** + * The Yarn Router will forward to the respective Yarn RM in which the AM is + * running. + *

+ * Possible failure: + *

+ * Client: identical behavior as {@code RMWebServices}. + *

+ * Router: the Client will timeout and resubmit the request. + *

+ * ResourceManager: the Router will timeout and the call will fail. + *

+ * State Store: the Router will timeout and it will retry depending on the + * FederationFacade settings - if the failure happened before the select + * operation. + */ + @Override + public AppInfo getApp(HttpServletRequest hsr, String appId, + Set unselectedFields) { + + ApplicationId applicationId = null; + try { + applicationId = ApplicationId.fromString(appId); + } catch (IllegalArgumentException e) { + return null; + } + + SubClusterInfo subClusterInfo = null; + SubClusterId subClusterId = null; + try { + subClusterId = + federationFacade.getApplicationHomeSubCluster(applicationId); + if (subClusterId == null) { + return null; + } + subClusterInfo = federationFacade.getSubCluster(subClusterId); + } catch (YarnException e) { + return null; + } + + return getOrCreateInterceptorForSubCluster(subClusterId, + subClusterInfo.getRMWebServiceAddress()).getApp(hsr, appId, + unselectedFields); + } + + /** + * The Yarn Router will forward to the respective Yarn RM in which the AM is + * running. + *

+ * Possible failures and behaviors: + *

+ * Client: identical behavior as {@code RMWebServices}. + *

+ * Router: the Client will timeout and resubmit the request. + *

+ * ResourceManager: the Router will timeout and the call will fail. + *

+ * State Store: the Router will timeout and it will retry depending on the + * FederationFacade settings - if the failure happened before the select + * operation. + */ + @Override + public Response updateAppState(AppState targetState, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + + ApplicationId applicationId = null; + try { + applicationId = ApplicationId.fromString(appId); + } catch (IllegalArgumentException e) { + return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage()) + .build(); + } + + SubClusterId subClusterId = + federationFacade.getApplicationHomeSubCluster(applicationId); + + SubClusterInfo subClusterInfo = + federationFacade.getSubCluster(subClusterId); + + return getOrCreateInterceptorForSubCluster(subClusterId, + subClusterInfo.getRMWebServiceAddress()).updateAppState(targetState, + hsr, appId); + } + + @Override + public ClusterInfo get() { + return getClusterInfo(); + } + + @Override + public ClusterInfo getClusterInfo() { + throw new NotImplementedException(); + } + + @Override + public ClusterMetricsInfo getClusterMetricsInfo() { + throw new NotImplementedException(); + } + + @Override + public SchedulerTypeInfo getSchedulerInfo() { + throw new NotImplementedException(); + } + + @Override + public String dumpSchedulerLogs(String time, HttpServletRequest hsr) + throws IOException { + throw new NotImplementedException(); + } + + @Override + public NodesInfo getNodes(String states) { + throw new NotImplementedException(); + } + + @Override + public NodeInfo getNode(String nodeId) { + throw new NotImplementedException(); + } + + @Override + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { + throw new NotImplementedException(); + } + + @Override + public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, + String appId, String time) { + throw new NotImplementedException(); + } + + @Override + public ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr, + Set stateQueries, Set typeQueries) { + throw new NotImplementedException(); + } + + @Override + public AppsInfo getApps(HttpServletRequest hsr, String stateQuery, + Set statesQuery, String finalStatusQuery, String userQuery, + String queueQuery, String count, String startedBegin, String startedEnd, + String finishBegin, String finishEnd, Set applicationTypes, + Set applicationTags, Set unselectedFields) { + throw new NotImplementedException(); + } + + @Override + public AppState getAppState(HttpServletRequest hsr, String appId) + throws AuthorizationException { + throw new NotImplementedException(); + } + + @Override + public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) + throws IOException { + throw new NotImplementedException(); + } + + @Override + public LabelsToNodesInfo getLabelsToNodes(Set labels) + throws IOException { + throw new NotImplementedException(); + } + + @Override + public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, + HttpServletRequest hsr) throws IOException { + throw new NotImplementedException(); + } + + @Override + public Response replaceLabelsOnNode(Set newNodeLabelsName, + HttpServletRequest hsr, String nodeId) throws Exception { + throw new NotImplementedException(); + } + + @Override + public NodeLabelsInfo getClusterNodeLabels(HttpServletRequest hsr) + throws IOException { + throw new NotImplementedException(); + } + + @Override + public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, + HttpServletRequest hsr) throws Exception { + throw new NotImplementedException(); + } + + @Override + public Response removeFromCluserNodeLabels(Set oldNodeLabels, + HttpServletRequest hsr) throws Exception { + throw new NotImplementedException(); + } + + @Override + public NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId) + throws IOException { + throw new NotImplementedException(); + } + + @Override + public AppPriority getAppPriority(HttpServletRequest hsr, String appId) + throws AuthorizationException { + throw new NotImplementedException(); + } + + @Override + public Response updateApplicationPriority(AppPriority targetPriority, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + throw new NotImplementedException(); + } + + @Override + public AppQueue getAppQueue(HttpServletRequest hsr, String appId) + throws AuthorizationException { + throw new NotImplementedException(); + } + + @Override + public Response updateAppQueue(AppQueue targetQueue, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + throw new NotImplementedException(); + } + + @Override + public Response postDelegationToken(DelegationToken tokenData, + HttpServletRequest hsr) throws AuthorizationException, IOException, + InterruptedException, Exception { + throw new NotImplementedException(); + } + + @Override + public Response postDelegationTokenExpiration(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + throw new NotImplementedException(); + } + + @Override + public Response cancelDelegationToken(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + throw new NotImplementedException(); + } + + @Override + public Response createNewReservation(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + throw new NotImplementedException(); + } + + @Override + public Response submitReservation(ReservationSubmissionRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + throw new NotImplementedException(); + } + + @Override + public Response updateReservation(ReservationUpdateRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + throw new NotImplementedException(); + } + + @Override + public Response deleteReservation(ReservationDeleteRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + throw new NotImplementedException(); + } + + @Override + public Response listReservation(String queue, String reservationId, + long startTime, long endTime, boolean includeResourceAllocations, + HttpServletRequest hsr) throws Exception { + throw new NotImplementedException(); + } + + @Override + public AppTimeoutInfo getAppTimeout(HttpServletRequest hsr, String appId, + String type) throws AuthorizationException { + throw new NotImplementedException(); + } + + @Override + public AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId) + throws AuthorizationException { + throw new NotImplementedException(); + } + + @Override + public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + throw new NotImplementedException(); + } + + @Override + public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) { + throw new NotImplementedException(); + } + + @Override + public AppAttemptInfo getAppAttempt(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + throw new NotImplementedException(); + } + + @Override + public ContainersInfo getContainers(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + throw new NotImplementedException(); + } + + @Override + public ContainerInfo getContainer(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId, + String containerId) { + throw new NotImplementedException(); + } + + @Override + public void setNextInterceptor(RESTRequestInterceptor next) { + throw new YarnRuntimeException("setNextInterceptor is being called on " + + "FederationInterceptorREST, which should be the last one " + + "in the chain. Check if the interceptor pipeline configuration " + + "is correct"); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/HTTPMethods.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/HTTPMethods.java new file mode 100644 index 00000000000..45056ca701b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/HTTPMethods.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +/** + * HTTP verbs. + **/ +public enum HTTPMethods { + + /* to retrieve resource representation/information */ + GET, + /* to update existing resource */ + PUT, + /* to delete resources */ + DELETE, + /* to create new subordinate resources */ + POST +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RESTRequestInterceptor.java new file mode 100644 index 00000000000..06f39b5e393 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RESTRequestInterceptor.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServiceProtocol; +import org.apache.hadoop.yarn.server.webapp.WebServices; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; + +/** + * Defines the contract to be implemented by the request intercepter classes, + * that can be used to intercept and inspect messages sent from the client to + * the resource manager server. + * + * This class includes 4 methods getAppAttempts, getAppAttempt, getContainers + * and getContainer that belong to {@link WebServices}. They are in this class + * to make sure that RouterWebServices implements the same REST methods of + * {@code RMWebServices}. + */ +public interface RESTRequestInterceptor + extends RMWebServiceProtocol, Configurable { + + /** + * This method is called for initializing the intercepter. This is guaranteed + * to be called only once in the lifetime of this instance. + * + * @param user the name of the client + */ + void init(String user); + + /** + * This method is called to release the resources held by the intercepter. + * This will be called when the application pipeline is being destroyed. The + * concrete implementations should dispose the resources and forward the + * request to the next intercepter, if any. + */ + void shutdown(); + + /** + * Sets the next intercepter in the pipeline. The concrete implementation of + * this interface should always pass the request to the nextInterceptor after + * inspecting the message. The last intercepter in the chain is responsible to + * send the messages to the resource manager service and so the last + * intercepter will not receive this method call. + * + * @param nextInterceptor the RESTRequestInterceptor to set in the pipeline + */ + void setNextInterceptor(RESTRequestInterceptor nextInterceptor); + + /** + * Returns the next intercepter in the chain. + * + * @return the next intercepter in the chain + */ + RESTRequestInterceptor getNextInterceptor(); + + /** + * + * @see WebServices#getAppAttempt(HttpServletRequest, HttpServletResponse, + * String, String) + * @param req the servlet request + * @param res the servlet response + * @param appId the application we want to get the appAttempt. It is a + * PathParam. + * @param appAttemptId the AppAttempt we want to get the info. It is a + * PathParam. + * @return AppAttemptInfo of the specific AppAttempt + */ + AppAttemptInfo getAppAttempt(HttpServletRequest req, HttpServletResponse res, + String appId, String appAttemptId); + + /** + * + * @see WebServices#getContainers(HttpServletRequest, HttpServletResponse, + * String, String) + * @param req the servlet request + * @param res the servlet response + * @param appId the application we want to get the containers info. It is a + * PathParam. + * @param appAttemptId the AppAttempt we want to get the info. It is a + * PathParam. + * @return ContainersInfo of all the containers that belong to the specific + * AppAttempt + */ + ContainersInfo getContainers(HttpServletRequest req, HttpServletResponse res, + String appId, String appAttemptId); + + /** + * + * @see WebServices#getContainer(HttpServletRequest, HttpServletResponse, + * String, String, String) + * @param req the servlet request + * @param res the servlet response + * @param appId the application we want to get the containers info. It is a + * PathParam. + * @param appAttemptId the AppAttempt we want to get the info. It is a + * PathParam. + * @param containerId the container we want to get the info. It is a + * PathParam. + * @return ContainerInfo of the specific ContainerId + */ + ContainerInfo getContainer(HttpServletRequest req, HttpServletResponse res, + String appId, String appAttemptId, String containerId); +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java new file mode 100644 index 00000000000..5436badfb63 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver; +import org.apache.hadoop.yarn.server.router.Router; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.apache.hadoop.yarn.webapp.WebApp; +import org.apache.hadoop.yarn.webapp.YarnWebParams; + +/** + * The Router webapp. + */ +public class RouterWebApp extends WebApp implements YarnWebParams { + private Router router; + + public RouterWebApp(Router router) { + this.router = router; + } + + @Override + public void setup() { + bind(JAXBContextResolver.class); + bind(RouterWebServices.class); + bind(GenericExceptionHandler.class); + bind(RouterWebApp.class).toInstance(this); + + if (router != null) { + bind(Router.class).toInstance(router); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java new file mode 100644 index 00000000000..18618eeac1b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java @@ -0,0 +1,227 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebAppUtil; +import org.apache.hadoop.yarn.webapp.BadRequestException; +import org.apache.hadoop.yarn.webapp.ForbiddenException; +import org.apache.hadoop.yarn.webapp.NotFoundException; + +import com.sun.jersey.api.ConflictException; +import com.sun.jersey.api.client.Client; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.WebResource.Builder; +import com.sun.jersey.core.util.MultivaluedMapImpl; + +/** + * The Router webservice util class. + */ +public final class RouterWebServiceUtil { + + private static String user = "YarnRouter"; + + private static final Log LOG = + LogFactory.getLog(RouterWebServiceUtil.class.getName()); + + /** Disable constructor. */ + private RouterWebServiceUtil() { + } + + /** + * Creates and performs a REST call to a specific WebService. + * + * @param webApp the address of the remote webap + * @param hsr the servlet request + * @param returnType the return type of the REST call + * @param Type of return object. + * @param method the HTTP method of the REST call + * @param targetPath additional path to add to the webapp address + * @param formParam the form parameters as input for a specific REST call + * @param additionalParam the query parameters as input for a specific REST + * call in case the call has no servlet request + * @return the retrieved entity from the REST call + */ + protected static T genericForward(String webApp, HttpServletRequest hsr, + final Class returnType, HTTPMethods method, String targetPath, + Object formParam, Map additionalParam) { + + UserGroupInformation callerUGI = null; + + if (hsr != null) { + callerUGI = RMWebAppUtil.getCallerUserGroupInformation(hsr, true); + } else { + // user not required + callerUGI = UserGroupInformation.createRemoteUser(user); + + } + + if (callerUGI == null) { + LOG.error("Unable to obtain user name, user not authenticated"); + return null; + } + + try { + return callerUGI.doAs(new PrivilegedExceptionAction() { + @SuppressWarnings("unchecked") + @Override + public T run() { + + Map paramMap = null; + + // We can have hsr or additionalParam. There are no case with both. + if (hsr != null) { + paramMap = hsr.getParameterMap(); + } else if (additionalParam != null) { + paramMap = additionalParam; + } + + ClientResponse response = RouterWebServiceUtil.invokeRMWebService( + webApp, targetPath, method, + (hsr == null) ? null : hsr.getPathInfo(), paramMap, formParam); + if (Response.class.equals(returnType)) { + return (T) RouterWebServiceUtil.clientResponseToResponse(response); + } + // YARN RM can answer with Status.OK or it throws an exception + if (response.getStatus() == 200) { + return response.getEntity(returnType); + } + RouterWebServiceUtil.retrieveException(response); + return null; + } + }); + } catch (InterruptedException e) { + return null; + } catch (IOException e) { + return null; + } + } + + /** + * Performs an invocation of a REST call on a remote RMWebService. + * + * @param additionalParam + */ + private static ClientResponse invokeRMWebService(String webApp, String path, + HTTPMethods method, String additionalPath, + Map queryParams, Object formParam) { + Client client = Client.create(); + + WebResource webResource = client.resource(webApp).path(path); + + if (additionalPath != null && !additionalPath.isEmpty()) { + webResource = webResource.path(additionalPath); + } + + if (queryParams != null && !queryParams.isEmpty()) { + MultivaluedMap paramMap = new MultivaluedMapImpl(); + + for (Entry param : queryParams.entrySet()) { + String[] values = param.getValue(); + for (int i = 0; i < values.length; i++) { + paramMap.add(param.getKey(), values[i]); + } + } + webResource = webResource.queryParams(paramMap); + } + + // I can forward the call in JSON or XML since the Router will convert it + // again in Object before send it back to the client + Builder builder = null; + if (formParam != null) { + builder = webResource.entity(formParam, MediaType.APPLICATION_XML); + builder = builder.accept(MediaType.APPLICATION_XML); + } else { + builder = webResource.accept(MediaType.APPLICATION_XML); + } + + ClientResponse response = null; + + switch (method) { + case DELETE: + response = builder.delete(ClientResponse.class); + break; + case GET: + response = builder.get(ClientResponse.class); + break; + case POST: + response = builder.post(ClientResponse.class); + break; + case PUT: + response = builder.put(ClientResponse.class); + break; + default: + break; + } + + return response; + } + + public static Response clientResponseToResponse(ClientResponse r) { + if (r == null) { + return null; + } + // copy the status code + ResponseBuilder rb = Response.status(r.getStatus()); + // copy all the headers + for (Entry> entry : r.getHeaders().entrySet()) { + for (String value : entry.getValue()) { + rb.header(entry.getKey(), value); + } + } + // copy the entity + rb.entity(r.getEntityInputStream()); + // return the response + return rb.build(); + } + + public static void retrieveException(ClientResponse response) { + String serverErrorMsg = response.getEntity(String.class); + int status = response.getStatus(); + if (status == 400) { + throw new BadRequestException(serverErrorMsg); + } + if (status == 403) { + throw new ForbiddenException(serverErrorMsg); + } + if (status == 404) { + throw new NotFoundException(serverErrorMsg); + } + if (status == 409) { + throw new ConflictException(serverErrorMsg); + } + + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java new file mode 100644 index 00000000000..bbb83268274 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java @@ -0,0 +1,876 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.DELETE; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.FormParam; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.JettyUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServiceProtocol; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.router.Router; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.util.LRUCacheHashMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.inject.Inject; +import com.google.inject.Singleton; + +/** + * RouterWebServices is a service that runs on each router that can be used to + * intercept and inspect {@link RMWebServiceProtocol} messages from client to + * the cluster resource manager. It listens {@link RMWebServiceProtocol} REST + * messages from the client and creates a request intercepting pipeline instance + * for each client. The pipeline is a chain of {@link RESTRequestInterceptor} + * instances that can inspect and modify the request/response as needed. The + * main difference with AMRMProxyService is the protocol they implement. + **/ +@Singleton +@Path("/ws/v1/cluster") +public class RouterWebServices implements RMWebServiceProtocol { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterWebServices.class); + private final Router router; + private final Configuration conf; + private @Context HttpServletResponse response; + + private Map userPipelineMap; + + // -------Default values of QueryParams for RMWebServiceProtocol-------- + + public static final String DEFAULT_QUEUE = "default"; + public static final String DEFAULT_RESERVATION_ID = ""; + public static final String DEFAULT_START_TIME = "0"; + public static final String DEFAULT_END_TIME = "-1"; + public static final String DEFAULT_INCLUDE_RESOURCE = "false"; + + @Inject + public RouterWebServices(final Router router, Configuration conf) { + this.router = router; + this.conf = conf; + int maxCacheSize = + conf.getInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE, + YarnConfiguration.DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE); + this.userPipelineMap = Collections.synchronizedMap( + new LRUCacheHashMap( + maxCacheSize, true)); + } + + /** + * Returns the comma separated intercepter class names from the configuration. + * + * @param conf + * @return the intercepter class names as an instance of ArrayList + */ + private List getInterceptorClassNames(Configuration config) { + String configuredInterceptorClassNames = + config.get(YarnConfiguration.ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE, + YarnConfiguration.DEFAULT_ROUTER_WEBAPP_INTERCEPTOR_CLASS); + + List interceptorClassNames = new ArrayList(); + Collection tempList = + StringUtils.getStringCollection(configuredInterceptorClassNames); + for (String item : tempList) { + interceptorClassNames.add(item.trim()); + } + + return interceptorClassNames; + } + + private void init() { + // clear content type + response.setContentType(null); + } + + @VisibleForTesting + protected RequestInterceptorChainWrapper getInterceptorChain() { + String user = ""; + try { + user = UserGroupInformation.getCurrentUser().getUserName(); + } catch (IOException e) { + LOG.error("IOException " + e.getMessage()); + } + if (!userPipelineMap.containsKey(user)) { + initializePipeline(user); + } + return userPipelineMap.get(user); + } + + /** + * Gets the Request intercepter chains for all the users. + * + * @return the request intercepter chains. + */ + @VisibleForTesting + protected Map getPipelines() { + return this.userPipelineMap; + } + + /** + * This method creates and returns reference of the first intercepter in the + * chain of request intercepter instances. + * + * @return the reference of the first intercepter in the chain + */ + @VisibleForTesting + protected RESTRequestInterceptor createRequestInterceptorChain() { + + List interceptorClassNames = getInterceptorClassNames(conf); + + RESTRequestInterceptor pipeline = null; + RESTRequestInterceptor current = null; + for (String interceptorClassName : interceptorClassNames) { + try { + Class interceptorClass = conf.getClassByName(interceptorClassName); + if (RESTRequestInterceptor.class.isAssignableFrom(interceptorClass)) { + RESTRequestInterceptor interceptorInstance = + (RESTRequestInterceptor) ReflectionUtils + .newInstance(interceptorClass, conf); + if (pipeline == null) { + pipeline = interceptorInstance; + current = interceptorInstance; + continue; + } else { + current.setNextInterceptor(interceptorInstance); + current = interceptorInstance; + } + } else { + throw new YarnRuntimeException( + "Class: " + interceptorClassName + " not instance of " + + RESTRequestInterceptor.class.getCanonicalName()); + } + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException( + "Could not instantiate RESTRequestInterceptor: " + + interceptorClassName, + e); + } + } + + if (pipeline == null) { + throw new YarnRuntimeException( + "RequestInterceptor pipeline is not configured in the system"); + } + return pipeline; + } + + /** + * Initializes the request intercepter pipeline for the specified user. + * + * @param user + */ + private void initializePipeline(String user) { + RequestInterceptorChainWrapper chainWrapper = null; + synchronized (this.userPipelineMap) { + if (this.userPipelineMap.containsKey(user)) { + LOG.info("Request to start an already existing user: {}" + + " was received, so ignoring.", user); + return; + } + + chainWrapper = new RequestInterceptorChainWrapper(); + this.userPipelineMap.put(user, chainWrapper); + } + + // We register the pipeline instance in the map first and then initialize it + // later because chain initialization can be expensive and we would like to + // release the lock as soon as possible to prevent other applications from + // blocking when one application's chain is initializing + LOG.info("Initializing request processing pipeline for the user: {}", user); + + try { + RESTRequestInterceptor interceptorChain = + this.createRequestInterceptorChain(); + interceptorChain.init(user); + chainWrapper.init(interceptorChain); + } catch (Exception e) { + synchronized (this.userPipelineMap) { + this.userPipelineMap.remove(user); + } + throw e; + } + } + + /** + * Private structure for encapsulating RequestInterceptor and user instances. + * + */ + @Private + public static class RequestInterceptorChainWrapper { + private RESTRequestInterceptor rootInterceptor; + + /** + * Initializes the wrapper with the specified parameters. + * + * @param interceptor the first interceptor in the pipeline + */ + public synchronized void init(RESTRequestInterceptor interceptor) { + this.rootInterceptor = interceptor; + } + + /** + * Gets the root request intercepter. + * + * @return the root request intercepter + */ + public synchronized RESTRequestInterceptor getRootInterceptor() { + return rootInterceptor; + } + + /** + * Shutdown the chain of interceptors when the object is destroyed. + */ + @Override + protected void finalize() { + rootInterceptor.shutdown(); + } + } + + @GET + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public ClusterInfo get() { + return getClusterInfo(); + } + + @GET + @Path(RMWSConsts.INFO) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public ClusterInfo getClusterInfo() { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getClusterInfo(); + } + + @GET + @Path(RMWSConsts.METRICS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public ClusterMetricsInfo getClusterMetricsInfo() { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getClusterMetricsInfo(); + } + + @GET + @Path(RMWSConsts.SCHEDULER) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public SchedulerTypeInfo getSchedulerInfo() { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getSchedulerInfo(); + } + + @POST + @Path(RMWSConsts.SCHEDULER_LOGS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public String dumpSchedulerLogs(@FormParam(RMWSConsts.TIME) String time, + @Context HttpServletRequest hsr) throws IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().dumpSchedulerLogs(time, hsr); + } + + @GET + @Path(RMWSConsts.NODES) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public NodesInfo getNodes(@QueryParam(RMWSConsts.STATES) String states) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getNodes(states); + } + + @GET + @Path(RMWSConsts.NODES_NODEID) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public NodeInfo getNode(@PathParam(RMWSConsts.NODEID) String nodeId) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getNode(nodeId); + } + + @GET + @Path(RMWSConsts.APPS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppsInfo getApps(@Context HttpServletRequest hsr, + @QueryParam(RMWSConsts.STATE) String stateQuery, + @QueryParam(RMWSConsts.STATES) Set statesQuery, + @QueryParam(RMWSConsts.FINAL_STATUS) String finalStatusQuery, + @QueryParam(RMWSConsts.USER) String userQuery, + @QueryParam(RMWSConsts.QUEUE) String queueQuery, + @QueryParam(RMWSConsts.LIMIT) String count, + @QueryParam(RMWSConsts.STARTED_TIME_BEGIN) String startedBegin, + @QueryParam(RMWSConsts.STARTED_TIME_END) String startedEnd, + @QueryParam(RMWSConsts.FINISHED_TIME_BEGIN) String finishBegin, + @QueryParam(RMWSConsts.FINISHED_TIME_END) String finishEnd, + @QueryParam(RMWSConsts.APPLICATION_TYPES) Set applicationTypes, + @QueryParam(RMWSConsts.APPLICATION_TAGS) Set applicationTags, + @QueryParam(RMWSConsts.DESELECTS) Set unselectedFields) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getApps(hsr, stateQuery, statesQuery, + finalStatusQuery, userQuery, queueQuery, count, startedBegin, + startedEnd, finishBegin, finishEnd, applicationTypes, applicationTags, + unselectedFields); + } + + @GET + @Path(RMWSConsts.SCHEDULER_ACTIVITIES) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public ActivitiesInfo getActivities(@Context HttpServletRequest hsr, + @QueryParam(RMWSConsts.NODEID) String nodeId) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getActivities(hsr, nodeId); + } + + @GET + @Path(RMWSConsts.SCHEDULER_APP_ACTIVITIES) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr, + @QueryParam(RMWSConsts.APP_ID) String appId, + @QueryParam(RMWSConsts.MAX_TIME) String time) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppActivities(hsr, appId, time); + } + + @GET + @Path(RMWSConsts.APP_STATISTICS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public ApplicationStatisticsInfo getAppStatistics( + @Context HttpServletRequest hsr, + @QueryParam(RMWSConsts.STATES) Set stateQueries, + @QueryParam(RMWSConsts.APPLICATION_TYPES) Set typeQueries) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppStatistics(hsr, stateQueries, + typeQueries); + } + + @GET + @Path(RMWSConsts.APPS_APPID) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppInfo getApp(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId, + @QueryParam(RMWSConsts.DESELECTS) Set unselectedFields) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getApp(hsr, appId, unselectedFields); + } + + @GET + @Path(RMWSConsts.APPS_APPID_STATE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppState getAppState(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppState(hsr, appId); + } + + @PUT + @Path(RMWSConsts.APPS_APPID_STATE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response updateAppState(AppState targetState, + @Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateAppState(targetState, hsr, + appId); + } + + @GET + @Path(RMWSConsts.GET_NODE_TO_LABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public NodeToLabelsInfo getNodeToLabels(@Context HttpServletRequest hsr) + throws IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getNodeToLabels(hsr); + } + + @GET + @Path(RMWSConsts.LABEL_MAPPINGS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public LabelsToNodesInfo getLabelsToNodes( + @QueryParam(RMWSConsts.LABELS) Set labels) throws IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getLabelsToNodes(labels); + } + + @POST + @Path(RMWSConsts.REPLACE_NODE_TO_LABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response replaceLabelsOnNodes( + final NodeToLabelsEntryList newNodeToLabels, + @Context HttpServletRequest hsr) throws Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().replaceLabelsOnNodes(newNodeToLabels, + hsr); + } + + @POST + @Path(RMWSConsts.NODES_NODEID_REPLACE_LABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response replaceLabelsOnNode( + @QueryParam(RMWSConsts.LABELS) Set newNodeLabelsName, + @Context HttpServletRequest hsr, + @PathParam(RMWSConsts.NODEID) String nodeId) throws Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().replaceLabelsOnNode(newNodeLabelsName, + hsr, nodeId); + } + + @GET + @Path(RMWSConsts.GET_NODE_LABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public NodeLabelsInfo getClusterNodeLabels(@Context HttpServletRequest hsr) + throws IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getClusterNodeLabels(hsr); + } + + @POST + @Path(RMWSConsts.ADD_NODE_LABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, + @Context HttpServletRequest hsr) throws Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().addToClusterNodeLabels(newNodeLabels, + hsr); + } + + @POST + @Path(RMWSConsts.REMOVE_NODE_LABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response removeFromCluserNodeLabels( + @QueryParam(RMWSConsts.LABELS) Set oldNodeLabels, + @Context HttpServletRequest hsr) throws Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor() + .removeFromCluserNodeLabels(oldNodeLabels, hsr); + } + + @GET + @Path(RMWSConsts.NODES_NODEID_GETLABELS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public NodeLabelsInfo getLabelsOnNode(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.NODEID) String nodeId) throws IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getLabelsOnNode(hsr, nodeId); + } + + @GET + @Path(RMWSConsts.APPS_APPID_PRIORITY) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppPriority getAppPriority(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppPriority(hsr, appId); + } + + @PUT + @Path(RMWSConsts.APPS_APPID_PRIORITY) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response updateApplicationPriority(AppPriority targetPriority, + @Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor() + .updateApplicationPriority(targetPriority, hsr, appId); + } + + @GET + @Path(RMWSConsts.APPS_APPID_QUEUE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppQueue getAppQueue(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppQueue(hsr, appId); + } + + @PUT + @Path(RMWSConsts.APPS_APPID_QUEUE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response updateAppQueue(AppQueue targetQueue, + @Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateAppQueue(targetQueue, hsr, + appId); + } + + @POST + @Path(RMWSConsts.APPS_NEW_APPLICATION) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response createNewApplication(@Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().createNewApplication(hsr); + } + + @POST + @Path(RMWSConsts.APPS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response submitApplication(ApplicationSubmissionContextInfo newApp, + @Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().submitApplication(newApp, hsr); + } + + @POST + @Path(RMWSConsts.DELEGATION_TOKEN) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response postDelegationToken(DelegationToken tokenData, + @Context HttpServletRequest hsr) throws AuthorizationException, + IOException, InterruptedException, Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().postDelegationToken(tokenData, hsr); + } + + @POST + @Path(RMWSConsts.DELEGATION_TOKEN_EXPIRATION) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response postDelegationTokenExpiration(@Context HttpServletRequest hsr) + throws AuthorizationException, IOException, Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().postDelegationTokenExpiration(hsr); + } + + @DELETE + @Path(RMWSConsts.DELEGATION_TOKEN) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response cancelDelegationToken(@Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().cancelDelegationToken(hsr); + } + + @POST + @Path(RMWSConsts.RESERVATION_NEW) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response createNewReservation(@Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().createNewReservation(hsr); + } + + @POST + @Path(RMWSConsts.RESERVATION_SUBMIT) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response submitReservation(ReservationSubmissionRequestInfo resContext, + @Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().submitReservation(resContext, hsr); + } + + @POST + @Path(RMWSConsts.RESERVATION_UPDATE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response updateReservation(ReservationUpdateRequestInfo resContext, + @Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateReservation(resContext, hsr); + } + + @POST + @Path(RMWSConsts.RESERVATION_DELETE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response deleteReservation(ReservationDeleteRequestInfo resContext, + @Context HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().deleteReservation(resContext, hsr); + } + + @GET + @Path(RMWSConsts.RESERVATION_LIST) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response listReservation( + @QueryParam(RMWSConsts.QUEUE) @DefaultValue(DEFAULT_QUEUE) String queue, + @QueryParam(RMWSConsts.RESERVATION_ID) @DefaultValue(DEFAULT_RESERVATION_ID) String reservationId, + @QueryParam(RMWSConsts.START_TIME) @DefaultValue(DEFAULT_START_TIME) long startTime, + @QueryParam(RMWSConsts.END_TIME) @DefaultValue(DEFAULT_END_TIME) long endTime, + @QueryParam(RMWSConsts.INCLUDE_RESOURCE) @DefaultValue(DEFAULT_INCLUDE_RESOURCE) boolean includeResourceAllocations, + @Context HttpServletRequest hsr) throws Exception { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().listReservation(queue, reservationId, + startTime, endTime, includeResourceAllocations, hsr); + } + + @GET + @Path(RMWSConsts.APPS_TIMEOUTS_TYPE) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppTimeoutInfo getAppTimeout(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId, + @PathParam(RMWSConsts.TYPE) String type) throws AuthorizationException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppTimeout(hsr, appId, type); + } + + @GET + @Path(RMWSConsts.APPS_TIMEOUTS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppTimeoutsInfo getAppTimeouts(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppTimeouts(hsr, appId); + } + + @PUT + @Path(RMWSConsts.APPS_TIMEOUT) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, + @Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().updateApplicationTimeout(appTimeout, + hsr, appId); + } + + @GET + @Path(RMWSConsts.APPS_APPID_APPATTEMPTS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + @Override + public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest hsr, + @PathParam(RMWSConsts.APPID) String appId) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppAttempts(hsr, appId); + } + + @GET + @Path(RMWSConsts.APPS_APPID_APPATTEMPTS_APPATTEMPTID) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + public org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo getAppAttempt( + @Context HttpServletRequest req, @Context HttpServletResponse res, + @PathParam(RMWSConsts.APPID) String appId, + @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getAppAttempt(req, res, appId, + appAttemptId); + } + + @GET + @Path(RMWSConsts.APPS_APPID_APPATTEMPTS_APPATTEMPTID_CONTAINERS) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + public ContainersInfo getContainers(@Context HttpServletRequest req, + @Context HttpServletResponse res, + @PathParam(RMWSConsts.APPID) String appId, + @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getContainers(req, res, appId, + appAttemptId); + } + + @GET + @Path(RMWSConsts.GET_CONTAINER) + @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, + MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) + public ContainerInfo getContainer(@Context HttpServletRequest req, + @Context HttpServletResponse res, + @PathParam(RMWSConsts.APPID) String appId, + @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId, + @PathParam(RMWSConsts.CONTAINERID) String containerId) { + init(); + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getContainer(req, res, appId, + appAttemptId, containerId); + } + + @VisibleForTesting + protected void setResponse(HttpServletResponse response) { + this.response = response; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/package-info.java new file mode 100644 index 00000000000..bd94ead9c06 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Router WebApp package. **/ +package org.apache.hadoop.yarn.server.router.webapp; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java new file mode 100644 index 00000000000..7fc4719bfb5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java @@ -0,0 +1,586 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.Collections; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ReservationDefinition; +import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.api.records.ReservationRequest; +import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; +import org.apache.hadoop.yarn.api.records.ReservationRequests; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.util.Clock; +import org.apache.hadoop.yarn.util.UTCClock; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +/** + * Base class for all the RouterClientRMService test cases. It provides utility + * methods that can be used by the concrete test case classes. + * + */ +public abstract class BaseRouterClientRMTest { + + /** + * The RouterClientRMService instance that will be used by all the test cases. + */ + private MockRouterClientRMService clientrmService; + /** + * Thread pool used for asynchronous operations. + */ + private static ExecutorService threadpool = Executors.newCachedThreadPool(); + private Configuration conf; + private AsyncDispatcher dispatcher; + + public final static int TEST_MAX_CACHE_SIZE = 10; + + protected MockRouterClientRMService getRouterClientRMService() { + Assert.assertNotNull(this.clientrmService); + return this.clientrmService; + } + + protected YarnConfiguration createConfiguration() { + YarnConfiguration config = new YarnConfiguration(); + String mockPassThroughInterceptorClass = + PassThroughClientRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain will call the mock resource manager. The others in the chain will + // simply forward it to the next one in the chain + config.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + + "," + mockPassThroughInterceptorClass + "," + + MockClientRequestInterceptor.class.getName()); + + config.setInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE, + TEST_MAX_CACHE_SIZE); + return config; + } + + @Before + public void setUp() { + this.conf = createConfiguration(); + this.dispatcher = new AsyncDispatcher(); + this.dispatcher.init(conf); + this.dispatcher.start(); + this.clientrmService = createAndStartRouterClientRMService(); + } + + public void setUpConfig() { + this.conf = createConfiguration(); + } + + protected Configuration getConf() { + return this.conf; + } + + @After + public void tearDown() { + if (clientrmService != null) { + clientrmService.stop(); + clientrmService = null; + } + if (this.dispatcher != null) { + this.dispatcher.stop(); + } + } + + protected ExecutorService getThreadPool() { + return threadpool; + } + + protected MockRouterClientRMService createAndStartRouterClientRMService() { + MockRouterClientRMService svc = new MockRouterClientRMService(); + svc.init(conf); + svc.start(); + return svc; + } + + protected static class MockRouterClientRMService + extends RouterClientRMService { + public MockRouterClientRMService() { + super(); + } + } + + protected GetNewApplicationResponse getNewApplication(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetNewApplicationResponse run() throws Exception { + GetNewApplicationRequest req = + GetNewApplicationRequest.newInstance(); + GetNewApplicationResponse response = + getRouterClientRMService().getNewApplication(req); + return response; + } + }); + } + + protected SubmitApplicationResponse submitApplication( + final ApplicationId appId, String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public SubmitApplicationResponse run() throws Exception { + ApplicationSubmissionContext context = + ApplicationSubmissionContext.newInstance(appId, "", "", null, + null, false, false, -1, null, null); + SubmitApplicationRequest req = + SubmitApplicationRequest.newInstance(context); + SubmitApplicationResponse response = + getRouterClientRMService().submitApplication(req); + return response; + } + }); + } + + protected KillApplicationResponse forceKillApplication( + final ApplicationId appId, String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public KillApplicationResponse run() throws Exception { + KillApplicationRequest req = + KillApplicationRequest.newInstance(appId); + KillApplicationResponse response = + getRouterClientRMService().forceKillApplication(req); + return response; + } + }); + } + + protected GetClusterMetricsResponse getClusterMetrics(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetClusterMetricsResponse run() throws Exception { + GetClusterMetricsRequest req = + GetClusterMetricsRequest.newInstance(); + GetClusterMetricsResponse response = + getRouterClientRMService().getClusterMetrics(req); + return response; + } + }); + } + + protected GetClusterNodesResponse getClusterNodes(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetClusterNodesResponse run() throws Exception { + GetClusterNodesRequest req = GetClusterNodesRequest.newInstance(); + GetClusterNodesResponse response = + getRouterClientRMService().getClusterNodes(req); + return response; + } + }); + } + + protected GetQueueInfoResponse getQueueInfo(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetQueueInfoResponse run() throws Exception { + GetQueueInfoRequest req = + GetQueueInfoRequest.newInstance("default", false, false, false); + GetQueueInfoResponse response = + getRouterClientRMService().getQueueInfo(req); + return response; + } + }); + } + + protected GetQueueUserAclsInfoResponse getQueueUserAcls(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetQueueUserAclsInfoResponse run() throws Exception { + GetQueueUserAclsInfoRequest req = + GetQueueUserAclsInfoRequest.newInstance(); + GetQueueUserAclsInfoResponse response = + getRouterClientRMService().getQueueUserAcls(req); + return response; + } + }); + } + + protected MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( + String user, final ApplicationId appId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public MoveApplicationAcrossQueuesResponse run() throws Exception { + + MoveApplicationAcrossQueuesRequest req = + MoveApplicationAcrossQueuesRequest.newInstance(appId, + "newQueue"); + MoveApplicationAcrossQueuesResponse response = + getRouterClientRMService().moveApplicationAcrossQueues(req); + return response; + } + }); + } + + public GetNewReservationResponse getNewReservation(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetNewReservationResponse run() throws Exception { + GetNewReservationResponse response = getRouterClientRMService() + .getNewReservation(GetNewReservationRequest.newInstance()); + return response; + } + }); + } + + protected ReservationSubmissionResponse submitReservation(String user, + final ReservationId reservationId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ReservationSubmissionResponse run() throws Exception { + Clock clock = new UTCClock(); + long arrival = clock.getTime(); + long duration = 60000; + long deadline = (long) (arrival + 1.05 * duration); + + ReservationSubmissionRequest req = createSimpleReservationRequest(1, + arrival, deadline, duration, reservationId); + ReservationSubmissionResponse response = + getRouterClientRMService().submitReservation(req); + return response; + } + }); + } + + protected ReservationUpdateResponse updateReservation(String user, + final ReservationId reservationId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ReservationUpdateResponse run() throws Exception { + Clock clock = new UTCClock(); + long arrival = clock.getTime(); + long duration = 60000; + long deadline = (long) (arrival + 1.05 * duration); + ReservationDefinition rDef = + createSimpleReservationRequest(1, arrival, deadline, duration, + reservationId).getReservationDefinition(); + + ReservationUpdateRequest req = + ReservationUpdateRequest.newInstance(rDef, reservationId); + ReservationUpdateResponse response = + getRouterClientRMService().updateReservation(req); + return response; + } + }); + } + + protected ReservationDeleteResponse deleteReservation(String user, + final ReservationId reservationId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ReservationDeleteResponse run() throws Exception { + ReservationDeleteRequest req = + ReservationDeleteRequest.newInstance(reservationId); + ReservationDeleteResponse response = + getRouterClientRMService().deleteReservation(req); + return response; + } + }); + } + + protected GetNodesToLabelsResponse getNodeToLabels(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetNodesToLabelsResponse run() throws Exception { + GetNodesToLabelsRequest req = GetNodesToLabelsRequest.newInstance(); + GetNodesToLabelsResponse response = + getRouterClientRMService().getNodeToLabels(req); + return response; + } + }); + } + + protected GetLabelsToNodesResponse getLabelsToNodes(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetLabelsToNodesResponse run() throws Exception { + GetLabelsToNodesRequest req = GetLabelsToNodesRequest.newInstance(); + GetLabelsToNodesResponse response = + getRouterClientRMService().getLabelsToNodes(req); + return response; + } + }); + } + + protected GetClusterNodeLabelsResponse getClusterNodeLabels(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetClusterNodeLabelsResponse run() throws Exception { + GetClusterNodeLabelsRequest req = + GetClusterNodeLabelsRequest.newInstance(); + GetClusterNodeLabelsResponse response = + getRouterClientRMService().getClusterNodeLabels(req); + return response; + } + }); + } + + protected GetApplicationReportResponse getApplicationReport(String user, + final ApplicationId appId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetApplicationReportResponse run() throws Exception { + GetApplicationReportRequest req = + GetApplicationReportRequest.newInstance(appId); + GetApplicationReportResponse response = + getRouterClientRMService().getApplicationReport(req); + return response; + } + }); + } + + protected GetApplicationsResponse getApplications(String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetApplicationsResponse run() throws Exception { + GetApplicationsRequest req = GetApplicationsRequest.newInstance(); + GetApplicationsResponse response = + getRouterClientRMService().getApplications(req); + return response; + } + }); + } + + protected GetApplicationAttemptReportResponse getApplicationAttemptReport( + String user, final ApplicationAttemptId appAttemptId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public GetApplicationAttemptReportResponse run() throws Exception { + GetApplicationAttemptReportRequest req = + GetApplicationAttemptReportRequest.newInstance(appAttemptId); + GetApplicationAttemptReportResponse response = + getRouterClientRMService().getApplicationAttemptReport(req); + return response; + } + }); + } + + protected GetApplicationAttemptsResponse getApplicationAttempts(String user, + final ApplicationId applicationId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetApplicationAttemptsResponse run() throws Exception { + GetApplicationAttemptsRequest req = + GetApplicationAttemptsRequest.newInstance(applicationId); + GetApplicationAttemptsResponse response = + getRouterClientRMService().getApplicationAttempts(req); + return response; + } + }); + } + + protected GetContainerReportResponse getContainerReport(String user, + final ContainerId containerId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetContainerReportResponse run() throws Exception { + GetContainerReportRequest req = + GetContainerReportRequest.newInstance(containerId); + GetContainerReportResponse response = + getRouterClientRMService().getContainerReport(req); + return response; + } + }); + } + + protected GetContainersResponse getContainers(String user, + final ApplicationAttemptId appAttemptId) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetContainersResponse run() throws Exception { + GetContainersRequest req = + GetContainersRequest.newInstance(appAttemptId); + GetContainersResponse response = + getRouterClientRMService().getContainers(req); + return response; + } + }); + } + + protected GetDelegationTokenResponse getDelegationToken(final String user) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public GetDelegationTokenResponse run() throws Exception { + GetDelegationTokenRequest req = + GetDelegationTokenRequest.newInstance(user); + GetDelegationTokenResponse response = + getRouterClientRMService().getDelegationToken(req); + return response; + } + }); + } + + protected RenewDelegationTokenResponse renewDelegationToken(String user, + final Token token) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RenewDelegationTokenResponse run() throws Exception { + RenewDelegationTokenRequest req = + RenewDelegationTokenRequest.newInstance(token); + RenewDelegationTokenResponse response = + getRouterClientRMService().renewDelegationToken(req); + return response; + } + }); + } + + protected CancelDelegationTokenResponse cancelDelegationToken(String user, + final Token token) + throws YarnException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public CancelDelegationTokenResponse run() throws Exception { + CancelDelegationTokenRequest req = + CancelDelegationTokenRequest.newInstance(token); + CancelDelegationTokenResponse response = + getRouterClientRMService().cancelDelegationToken(req); + return response; + } + }); + } + + private ReservationSubmissionRequest createSimpleReservationRequest( + int numContainers, long arrival, long deadline, long duration, + ReservationId reservationId) { + // create a request with a single atomic ask + ReservationRequest r = ReservationRequest + .newInstance(Resource.newInstance(1024, 1), numContainers, 1, duration); + ReservationRequests reqs = ReservationRequests.newInstance( + Collections.singletonList(r), ReservationRequestInterpreter.R_ALL); + ReservationDefinition rDef = ReservationDefinition.newInstance(arrival, + deadline, reqs, "testRouterClientRMService#reservation"); + ReservationSubmissionRequest request = ReservationSubmissionRequest + .newInstance(rDef, "dedicated", reservationId); + return request; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/MockClientRequestInterceptor.java similarity index 58% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/MockClientRequestInterceptor.java index 9479d0bcdd7..b38703fb542 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrDecreaseContainersResourceEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/MockClientRequestInterceptor.java @@ -16,22 +16,21 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.nodemanager; +package org.apache.hadoop.yarn.server.router.clientrm; -import org.apache.hadoop.yarn.api.records.Container; -import java.util.List; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; -public class CMgrDecreaseContainersResourceEvent extends ContainerManagerEvent { +/** + * This class mocks the ClientRequestInterceptor. + */ +public class MockClientRequestInterceptor + extends DefaultClientRequestInterceptor { - private final List containersToDecrease; - - public CMgrDecreaseContainersResourceEvent(List - containersToDecrease) { - super(ContainerManagerEventType.DECREASE_CONTAINERS_RESOURCE); - this.containersToDecrease = containersToDecrease; + public void init(String user) { + MockResourceManagerFacade mockRM = new MockResourceManagerFacade( + new YarnConfiguration(super.getConf()), 0); + super.setRMClient(mockRM); } - public List getContainersToDecrease() { - return this.containersToDecrease; - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java new file mode 100644 index 00000000000..c403bd5006c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java @@ -0,0 +1,267 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; + +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; +import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; +import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * Mock intercepter that does not do anything other than forwarding it to the + * next intercepter in the chain. + */ +public class PassThroughClientRequestInterceptor + extends AbstractClientRequestInterceptor { + + @Override + public GetNewApplicationResponse getNewApplication( + GetNewApplicationRequest request) throws YarnException, IOException { + return getNextInterceptor().getNewApplication(request); + } + + @Override + public SubmitApplicationResponse submitApplication( + SubmitApplicationRequest request) throws YarnException, IOException { + return getNextInterceptor().submitApplication(request); + } + + @Override + public KillApplicationResponse forceKillApplication( + KillApplicationRequest request) throws YarnException, IOException { + return getNextInterceptor().forceKillApplication(request); + } + + @Override + public GetClusterMetricsResponse getClusterMetrics( + GetClusterMetricsRequest request) throws YarnException, IOException { + return getNextInterceptor().getClusterMetrics(request); + } + + @Override + public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) + throws YarnException, IOException { + return getNextInterceptor().getClusterNodes(request); + } + + @Override + public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) + throws YarnException, IOException { + return getNextInterceptor().getQueueInfo(request); + } + + @Override + public GetQueueUserAclsInfoResponse getQueueUserAcls( + GetQueueUserAclsInfoRequest request) throws YarnException, IOException { + return getNextInterceptor().getQueueUserAcls(request); + } + + @Override + public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( + MoveApplicationAcrossQueuesRequest request) + throws YarnException, IOException { + return getNextInterceptor().moveApplicationAcrossQueues(request); + } + + @Override + public GetNewReservationResponse getNewReservation( + GetNewReservationRequest request) throws YarnException, IOException { + return getNextInterceptor().getNewReservation(request); + } + + @Override + public ReservationSubmissionResponse submitReservation( + ReservationSubmissionRequest request) throws YarnException, IOException { + return getNextInterceptor().submitReservation(request); + } + + @Override + public ReservationListResponse listReservations( + ReservationListRequest request) throws YarnException, IOException { + return getNextInterceptor().listReservations(request); + } + + @Override + public ReservationUpdateResponse updateReservation( + ReservationUpdateRequest request) throws YarnException, IOException { + return getNextInterceptor().updateReservation(request); + } + + @Override + public ReservationDeleteResponse deleteReservation( + ReservationDeleteRequest request) throws YarnException, IOException { + return getNextInterceptor().deleteReservation(request); + } + + @Override + public GetNodesToLabelsResponse getNodeToLabels( + GetNodesToLabelsRequest request) throws YarnException, IOException { + return getNextInterceptor().getNodeToLabels(request); + } + + @Override + public GetLabelsToNodesResponse getLabelsToNodes( + GetLabelsToNodesRequest request) throws YarnException, IOException { + return getNextInterceptor().getLabelsToNodes(request); + } + + @Override + public GetClusterNodeLabelsResponse getClusterNodeLabels( + GetClusterNodeLabelsRequest request) throws YarnException, IOException { + return getNextInterceptor().getClusterNodeLabels(request); + } + + @Override + public GetApplicationReportResponse getApplicationReport( + GetApplicationReportRequest request) throws YarnException, IOException { + return getNextInterceptor().getApplicationReport(request); + } + + @Override + public GetApplicationsResponse getApplications(GetApplicationsRequest request) + throws YarnException, IOException { + return getNextInterceptor().getApplications(request); + } + + @Override + public GetApplicationAttemptReportResponse getApplicationAttemptReport( + GetApplicationAttemptReportRequest request) + throws YarnException, IOException { + return getNextInterceptor().getApplicationAttemptReport(request); + } + + @Override + public GetApplicationAttemptsResponse getApplicationAttempts( + GetApplicationAttemptsRequest request) throws YarnException, IOException { + return getNextInterceptor().getApplicationAttempts(request); + } + + @Override + public GetContainerReportResponse getContainerReport( + GetContainerReportRequest request) throws YarnException, IOException { + return getNextInterceptor().getContainerReport(request); + } + + @Override + public GetContainersResponse getContainers(GetContainersRequest request) + throws YarnException, IOException { + return getNextInterceptor().getContainers(request); + } + + @Override + public GetDelegationTokenResponse getDelegationToken( + GetDelegationTokenRequest request) throws YarnException, IOException { + return getNextInterceptor().getDelegationToken(request); + } + + @Override + public RenewDelegationTokenResponse renewDelegationToken( + RenewDelegationTokenRequest request) throws YarnException, IOException { + return getNextInterceptor().renewDelegationToken(request); + } + + @Override + public CancelDelegationTokenResponse cancelDelegationToken( + CancelDelegationTokenRequest request) throws YarnException, IOException { + return getNextInterceptor().cancelDelegationToken(request); + } + + @Override + public FailApplicationAttemptResponse failApplicationAttempt( + FailApplicationAttemptRequest request) throws YarnException, IOException { + return getNextInterceptor().failApplicationAttempt(request); + } + + @Override + public UpdateApplicationPriorityResponse updateApplicationPriority( + UpdateApplicationPriorityRequest request) + throws YarnException, IOException { + return getNextInterceptor().updateApplicationPriority(request); + } + + @Override + public SignalContainerResponse signalToContainer( + SignalContainerRequest request) throws YarnException, IOException { + return getNextInterceptor().signalToContainer(request); + } + + @Override + public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( + UpdateApplicationTimeoutsRequest request) + throws YarnException, IOException { + return getNextInterceptor().updateApplicationTimeouts(request); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java new file mode 100644 index 00000000000..87dfc95cd9e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java @@ -0,0 +1,403 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreTestUtil; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extends the {@code BaseRouterClientRMTest} and overrides methods in order to + * use the {@code RouterClientRMService} pipeline test cases for testing the + * {@code FederationInterceptor} class. The tests for + * {@code RouterClientRMService} has been written cleverly so that it can be + * reused to validate different request intercepter chains. + */ +public class TestFederationClientInterceptor extends BaseRouterClientRMTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestFederationClientInterceptor.class); + + private TestableFederationClientInterceptor interceptor; + private MemoryFederationStateStore stateStore; + private FederationStateStoreTestUtil stateStoreUtil; + private List subClusters; + + private String user = "test-user"; + + private final static int NUM_SUBCLUSTER = 4; + + @Override + public void setUp() { + super.setUpConfig(); + interceptor = new TestableFederationClientInterceptor(); + + stateStore = new MemoryFederationStateStore(); + stateStore.init(this.getConf()); + FederationStateStoreFacade.getInstance().reinitialize(stateStore, + getConf()); + stateStoreUtil = new FederationStateStoreTestUtil(stateStore); + + interceptor.setConf(this.getConf()); + interceptor.init(user); + + subClusters = new ArrayList(); + + try { + for (int i = 0; i < NUM_SUBCLUSTER; i++) { + SubClusterId sc = SubClusterId.newInstance(Integer.toString(i)); + stateStoreUtil.registerSubCluster(sc); + subClusters.add(sc); + } + } catch (YarnException e) { + LOG.error(e.getMessage()); + Assert.fail(); + } + + } + + @Override + public void tearDown() { + interceptor.shutdown(); + super.tearDown(); + } + + @Override + protected YarnConfiguration createConfiguration() { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + String mockPassThroughInterceptorClass = + PassThroughClientRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain is the federation intercepter that calls the mock resource manager. + // The others in the chain will simply forward it to the next one in the + // chain + conf.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + + "," + TestableFederationClientInterceptor.class.getName()); + + conf.set(YarnConfiguration.FEDERATION_POLICY_MANAGER, + UniformBroadcastPolicyManager.class.getName()); + + // Disable StateStoreFacade cache + conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0); + + return conf; + } + + /** + * This test validates the correctness of GetNewApplication. The return + * ApplicationId has to belong to one of the SubCluster in the cluster. + */ + @Test + public void testGetNewApplication() + throws YarnException, IOException, InterruptedException { + System.out.println("Test FederationClientInterceptor: Get New Application"); + + GetNewApplicationRequest request = GetNewApplicationRequest.newInstance(); + GetNewApplicationResponse response = interceptor.getNewApplication(request); + + Assert.assertNotNull(response); + Assert.assertNotNull(response.getApplicationId()); + Assert.assertTrue( + response.getApplicationId().getClusterTimestamp() < NUM_SUBCLUSTER); + Assert.assertTrue(response.getApplicationId().getClusterTimestamp() >= 0); + } + + /** + * This test validates the correctness of SubmitApplication. The application + * has to be submitted to one of the SubCluster in the cluster. + */ + @Test + public void testSubmitApplication() + throws YarnException, IOException, InterruptedException { + System.out.println("Test FederationClientInterceptor: Submit Application"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId); + Assert.assertNotNull(scIdResult); + Assert.assertTrue(subClusters.contains(scIdResult)); + } + + /** + * This test validates the correctness of SubmitApplication in case of + * multiple submission. The first retry has to be submitted to the same + * SubCluster of the first attempt. + */ + @Test + public void testSubmitApplicationMultipleSubmission() + throws YarnException, IOException, InterruptedException { + System.out.println( + "Test FederationClientInterceptor: Submit Application - Multiple"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + + // First attempt + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId); + Assert.assertNotNull(scIdResult); + + // First retry + response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + SubClusterId scIdResult2 = stateStoreUtil.queryApplicationHomeSC(appId); + Assert.assertNotNull(scIdResult2); + Assert.assertEquals(scIdResult, scIdResult); + } + + /** + * This test validates the correctness of SubmitApplication in case of empty + * request. + */ + @Test + public void testSubmitApplicationEmptyRequest() + throws YarnException, IOException, InterruptedException { + System.out.println( + "Test FederationClientInterceptor: Submit Application - Empty"); + try { + interceptor.submitApplication(null); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing submitApplication request or " + + "applicationSubmissionContex information.")); + } + try { + interceptor.submitApplication(SubmitApplicationRequest.newInstance(null)); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing submitApplication request or " + + "applicationSubmissionContex information.")); + } + try { + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(null, "", "", null, null, false, false, -1, null, null); + SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + interceptor.submitApplication(request); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing submitApplication request or " + + "applicationSubmissionContex information.")); + } + } + + /** + * This test validates the correctness of ForceKillApplication in case the + * application exists in the cluster. + */ + @Test + public void testForceKillApplication() + throws YarnException, IOException, InterruptedException { + System.out + .println("Test FederationClientInterceptor: Force Kill Application"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + + SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + // Submit the application we are going to kill later + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + KillApplicationRequest requestKill = + KillApplicationRequest.newInstance(appId); + KillApplicationResponse responseKill = + interceptor.forceKillApplication(requestKill); + Assert.assertNotNull(responseKill); + } + + /** + * This test validates the correctness of ForceKillApplication in case of + * application does not exist in StateStore. + */ + @Test + public void testForceKillApplicationNotExists() + throws YarnException, IOException, InterruptedException { + System.out.println("Test FederationClientInterceptor: " + + "Force Kill Application - Not Exists"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + KillApplicationRequest requestKill = + KillApplicationRequest.newInstance(appId); + try { + interceptor.forceKillApplication(requestKill); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage().equals( + "Application " + appId + " does not exist in FederationStateStore")); + } + } + + /** + * This test validates the correctness of ForceKillApplication in case of + * empty request. + */ + @Test + public void testForceKillApplicationEmptyRequest() + throws YarnException, IOException, InterruptedException { + System.out.println( + "Test FederationClientInterceptor: Force Kill Application - Empty"); + try { + interceptor.forceKillApplication(null); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage().startsWith( + "Missing forceKillApplication request or ApplicationId.")); + } + try { + interceptor + .forceKillApplication(KillApplicationRequest.newInstance(null)); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage().startsWith( + "Missing forceKillApplication request or ApplicationId.")); + } + } + + /** + * This test validates the correctness of GetApplicationReport in case the + * application exists in the cluster. + */ + @Test + public void testGetApplicationReport() + throws YarnException, IOException, InterruptedException { + System.out + .println("Test FederationClientInterceptor: Get Application Report"); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + + SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + // Submit the application we want the report later + SubmitApplicationResponse response = interceptor.submitApplication(request); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + GetApplicationReportRequest requestGet = + GetApplicationReportRequest.newInstance(appId); + + GetApplicationReportResponse responseGet = + interceptor.getApplicationReport(requestGet); + + Assert.assertNotNull(responseGet); + } + + /** + * This test validates the correctness of GetApplicationReport in case the + * application does not exist in StateStore. + */ + @Test + public void testGetApplicationNotExists() + throws YarnException, IOException, InterruptedException { + System.out.println( + "Test ApplicationClientProtocol: Get Application Report - Not Exists"); + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + GetApplicationReportRequest requestGet = + GetApplicationReportRequest.newInstance(appId); + try { + interceptor.getApplicationReport(requestGet); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue(e.getMessage().equals( + "Application " + appId + " does not exist in FederationStateStore")); + } + } + + /** + * This test validates the correctness of GetApplicationReport in case of + * empty request. + */ + @Test + public void testGetApplicationEmptyRequest() + throws YarnException, IOException, InterruptedException { + System.out.println( + "Test FederationClientInterceptor: Get Application Report - Empty"); + try { + interceptor.getApplicationReport(null); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing getApplicationReport request or " + + "applicationId information.")); + } + try { + interceptor + .getApplicationReport(GetApplicationReportRequest.newInstance(null)); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().startsWith("Missing getApplicationReport request or " + + "applicationId information.")); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptorRetry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptorRetry.java new file mode 100644 index 00000000000..a655c163a67 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptorRetry.java @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreTestUtil; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extends the {@code BaseRouterClientRMTest} and overrides methods in order to + * use the {@code RouterClientRMService} pipeline test cases for testing the + * {@code FederationInterceptor} class. The tests for + * {@code RouterClientRMService} has been written cleverly so that it can be + * reused to validate different request intercepter chains. + * + * It tests the case with SubClusters down and the Router logic of retries. We + * have 1 good SubCluster and 2 bad ones for all the tests. + */ +public class TestFederationClientInterceptorRetry + extends BaseRouterClientRMTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestFederationClientInterceptorRetry.class); + + private TestableFederationClientInterceptor interceptor; + private MemoryFederationStateStore stateStore; + private FederationStateStoreTestUtil stateStoreUtil; + + private String user = "test-user"; + + // running and registered + private static SubClusterId good; + + // registered but not running + private static SubClusterId bad1; + private static SubClusterId bad2; + + private static List scs = new ArrayList(); + + @Override + public void setUp() { + super.setUpConfig(); + interceptor = new TestableFederationClientInterceptor(); + + stateStore = new MemoryFederationStateStore(); + stateStore.init(this.getConf()); + FederationStateStoreFacade.getInstance().reinitialize(stateStore, + getConf()); + stateStoreUtil = new FederationStateStoreTestUtil(stateStore); + + interceptor.setConf(this.getConf()); + interceptor.init(user); + + // Create SubClusters + good = SubClusterId.newInstance("0"); + bad1 = SubClusterId.newInstance("1"); + bad2 = SubClusterId.newInstance("2"); + scs.add(good); + scs.add(bad1); + scs.add(bad2); + + // The mock RM will not start in these SubClusters, this is done to simulate + // a SubCluster down + + interceptor.registerBadSubCluster(bad1); + interceptor.registerBadSubCluster(bad2); + } + + @Override + public void tearDown() { + interceptor.shutdown(); + super.tearDown(); + } + + private void setupCluster(List scsToRegister) + throws YarnException { + + try { + // Clean up the StateStore before every test + stateStoreUtil.deregisterAllSubClusters(); + + for (SubClusterId sc : scsToRegister) { + stateStoreUtil.registerSubCluster(sc); + } + } catch (YarnException e) { + LOG.error(e.getMessage()); + Assert.fail(); + } + } + + @Override + protected YarnConfiguration createConfiguration() { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + String mockPassThroughInterceptorClass = + PassThroughClientRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain is the federation intercepter that calls the mock resource manager. + // The others in the chain will simply forward it to the next one in the + // chain + conf.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + + "," + TestableFederationClientInterceptor.class.getName()); + + conf.set(YarnConfiguration.FEDERATION_POLICY_MANAGER, + UniformBroadcastPolicyManager.class.getName()); + + // Disable StateStoreFacade cache + conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0); + + return conf; + } + + /** + * This test validates the correctness of GetNewApplication in case the + * cluster is composed of only 1 bad SubCluster. + */ + @Test + public void testGetNewApplicationOneBadSC() + throws YarnException, IOException, InterruptedException { + + System.out.println("Test getNewApplication with one bad SubCluster"); + setupCluster(Arrays.asList(bad2)); + + try { + interceptor.getNewApplication(GetNewApplicationRequest.newInstance()); + Assert.fail(); + } catch (Exception e) { + System.out.println(e.toString()); + Assert.assertTrue(e.getMessage() + .equals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE)); + } + } + + /** + * This test validates the correctness of GetNewApplication in case the + * cluster is composed of only 2 bad SubClusters. + */ + @Test + public void testGetNewApplicationTwoBadSCs() + throws YarnException, IOException, InterruptedException { + System.out.println("Test getNewApplication with two bad SubClusters"); + setupCluster(Arrays.asList(bad1, bad2)); + + try { + interceptor.getNewApplication(GetNewApplicationRequest.newInstance()); + Assert.fail(); + } catch (Exception e) { + System.out.println(e.toString()); + Assert.assertTrue(e.getMessage() + .equals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE)); + } + } + + /** + * This test validates the correctness of GetNewApplication in case the + * cluster is composed of only 1 bad SubCluster and 1 good one. + */ + @Test + public void testGetNewApplicationOneBadOneGood() + throws YarnException, IOException, InterruptedException { + System.out.println("Test getNewApplication with one bad, one good SC"); + setupCluster(Arrays.asList(good, bad2)); + GetNewApplicationResponse response = null; + try { + response = + interceptor.getNewApplication(GetNewApplicationRequest.newInstance()); + } catch (Exception e) { + Assert.fail(); + } + Assert.assertEquals(Integer.parseInt(good.getId()), + response.getApplicationId().getClusterTimestamp()); + } + + /** + * This test validates the correctness of SubmitApplication in case the + * cluster is composed of only 1 bad SubCluster. + */ + @Test + public void testSubmitApplicationOneBadSC() + throws YarnException, IOException, InterruptedException { + + System.out.println("Test submitApplication with one bad SubCluster"); + setupCluster(Arrays.asList(bad2)); + + final ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + final SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + try { + interceptor.submitApplication(request); + Assert.fail(); + } catch (Exception e) { + System.out.println(e.toString()); + Assert.assertTrue(e.getMessage() + .equals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE)); + } + } + + /** + * This test validates the correctness of SubmitApplication in case the + * cluster is composed of only 2 bad SubClusters. + */ + @Test + public void testSubmitApplicationTwoBadSCs() + throws YarnException, IOException, InterruptedException { + System.out.println("Test submitApplication with two bad SubClusters"); + setupCluster(Arrays.asList(bad1, bad2)); + + final ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + final SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + try { + interceptor.submitApplication(request); + Assert.fail(); + } catch (Exception e) { + System.out.println(e.toString()); + Assert.assertTrue(e.getMessage() + .equals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE)); + } + } + + /** + * This test validates the correctness of SubmitApplication in case the + * cluster is composed of only 1 bad SubCluster and a good one. + */ + @Test + public void testSubmitApplicationOneBadOneGood() + throws YarnException, IOException, InterruptedException { + System.out.println("Test submitApplication with one bad, one good SC"); + setupCluster(Arrays.asList(good, bad2)); + + final ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + ApplicationSubmissionContext context = ApplicationSubmissionContext + .newInstance(appId, "", "", null, null, false, false, -1, null, null); + final SubmitApplicationRequest request = + SubmitApplicationRequest.newInstance(context); + try { + interceptor.submitApplication(request); + } catch (Exception e) { + Assert.fail(); + } + Assert.assertEquals(good, + stateStore + .getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest.newInstance(appId)) + .getApplicationHomeSubCluster().getHomeSubCluster()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java new file mode 100644 index 00000000000..a9c37293f69 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java @@ -0,0 +1,210 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; +import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.router.clientrm.RouterClientRMService.RequestInterceptorChainWrapper; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test class to validate the ClientRM Service inside the Router. + */ +public class TestRouterClientRMService extends BaseRouterClientRMTest { + + private static final Logger LOG = + LoggerFactory.getLogger(TestRouterClientRMService.class); + + /** + * Tests if the pipeline is created properly. + */ + @Test + public void testRequestInterceptorChainCreation() throws Exception { + ClientRequestInterceptor root = + super.getRouterClientRMService().createRequestInterceptorChain(); + int index = 0; + while (root != null) { + // The current pipeline is: + // PassThroughClientRequestInterceptor - index = 0 + // PassThroughClientRequestInterceptor - index = 1 + // PassThroughClientRequestInterceptor - index = 2 + // MockClientRequestInterceptor - index = 3 + switch (index) { + case 0: // Fall to the next case + case 1: // Fall to the next case + case 2: + // If index is equal to 0,1 or 2 we fall in this check + Assert.assertEquals(PassThroughClientRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + case 3: + Assert.assertEquals(MockClientRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + default: + Assert.fail(); + } + root = root.getNextInterceptor(); + index++; + } + Assert.assertEquals("The number of interceptors in chain does not match", 4, + index); + } + + /** + * Test if the RouterClientRM forwards all the requests to the MockRM and get + * back the responses. + */ + @Test + public void testRouterClientRMServiceE2E() throws Exception { + + String user = "test1"; + + LOG.info("testRouterClientRMServiceE2E - Get New Application"); + + GetNewApplicationResponse responseGetNewApp = getNewApplication(user); + Assert.assertNotNull(responseGetNewApp); + + LOG.info("testRouterClientRMServiceE2E - Submit Application"); + + SubmitApplicationResponse responseSubmitApp = + submitApplication(responseGetNewApp.getApplicationId(), user); + Assert.assertNotNull(responseSubmitApp); + + LOG.info("testRouterClientRMServiceE2E - Kill Application"); + + KillApplicationResponse responseKillApp = + forceKillApplication(responseGetNewApp.getApplicationId(), user); + Assert.assertNotNull(responseKillApp); + + LOG.info("testRouterClientRMServiceE2E - Get Cluster Metrics"); + + GetClusterMetricsResponse responseGetClusterMetrics = + getClusterMetrics(user); + Assert.assertNotNull(responseGetClusterMetrics); + + LOG.info("testRouterClientRMServiceE2E - Get Cluster Nodes"); + + GetClusterNodesResponse responseGetClusterNodes = getClusterNodes(user); + Assert.assertNotNull(responseGetClusterNodes); + + LOG.info("testRouterClientRMServiceE2E - Get Queue Info"); + + GetQueueInfoResponse responseGetQueueInfo = getQueueInfo(user); + Assert.assertNotNull(responseGetQueueInfo); + + LOG.info("testRouterClientRMServiceE2E - Get Queue User"); + + GetQueueUserAclsInfoResponse responseGetQueueUser = getQueueUserAcls(user); + Assert.assertNotNull(responseGetQueueUser); + + LOG.info("testRouterClientRMServiceE2E - Get Cluster Node"); + + GetClusterNodeLabelsResponse responseGetClusterNode = + getClusterNodeLabels(user); + Assert.assertNotNull(responseGetClusterNode); + + LOG.info("testRouterClientRMServiceE2E - Move Application Across Queues"); + + MoveApplicationAcrossQueuesResponse responseMoveApp = + moveApplicationAcrossQueues(user, responseGetNewApp.getApplicationId()); + Assert.assertNotNull(responseMoveApp); + + LOG.info("testRouterClientRMServiceE2E - Get New Reservation"); + + GetNewReservationResponse getNewReservationResponse = + getNewReservation(user); + + LOG.info("testRouterClientRMServiceE2E - Submit Reservation"); + + ReservationSubmissionResponse responseSubmitReser = + submitReservation(user, getNewReservationResponse.getReservationId()); + Assert.assertNotNull(responseSubmitReser); + + LOG.info("testRouterClientRMServiceE2E - Update Reservation"); + + ReservationUpdateResponse responseUpdateReser = + updateReservation(user, getNewReservationResponse.getReservationId()); + Assert.assertNotNull(responseUpdateReser); + + LOG.info("testRouterClientRMServiceE2E - Delete Reservation"); + + ReservationDeleteResponse responseDeleteReser = + deleteReservation(user, getNewReservationResponse.getReservationId()); + Assert.assertNotNull(responseDeleteReser); + } + + /** + * Test if the different chains for users are generated, and LRU cache is + * working as expected. + */ + @Test + public void testUsersChainMapWithLRUCache() + throws YarnException, IOException, InterruptedException { + + Map pipelines; + RequestInterceptorChainWrapper chain; + + getNewApplication("test1"); + getNewApplication("test2"); + getNewApplication("test3"); + getNewApplication("test4"); + getNewApplication("test5"); + getNewApplication("test6"); + getNewApplication("test7"); + getNewApplication("test8"); + + pipelines = super.getRouterClientRMService().getPipelines(); + Assert.assertEquals(8, pipelines.size()); + + getNewApplication("test9"); + getNewApplication("test10"); + getNewApplication("test1"); + getNewApplication("test11"); + + // The cache max size is defined in + // BaseRouterClientRMTest.TEST_MAX_CACHE_SIZE + Assert.assertEquals(10, pipelines.size()); + + chain = pipelines.get("test1"); + Assert.assertNotNull("test1 should not be evicted", chain); + + chain = pipelines.get("test2"); + Assert.assertNull("test2 should have been evicted", chain); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java new file mode 100644 index 00000000000..e4a1a42bd64 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestableFederationClientInterceptor.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.clientrm; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * Extends the FederationClientInterceptor and overrides methods to provide a + * testable implementation of FederationClientInterceptor. + */ +public class TestableFederationClientInterceptor + extends FederationClientInterceptor { + + private ConcurrentHashMap mockRMs = + new ConcurrentHashMap<>(); + + private List badSubCluster = new ArrayList(); + + @Override + protected ApplicationClientProtocol getClientRMProxyForSubCluster( + SubClusterId subClusterId) throws YarnException { + + MockResourceManagerFacade mockRM = null; + synchronized (this) { + if (mockRMs.containsKey(subClusterId)) { + mockRM = mockRMs.get(subClusterId); + } else { + mockRM = new MockResourceManagerFacade(super.getConf(), 0, + Integer.parseInt(subClusterId.getId()), + !badSubCluster.contains(subClusterId)); + mockRMs.put(subClusterId, mockRM); + + } + return mockRM; + } + } + + /** + * For testing purpose, some subclusters has to be down to simulate particular + * scenarios as RM Failover, network issues. For this reason we keep track of + * these bad subclusters. This method make the subcluster unusable. + * + * @param badSC the subcluster to make unusable + */ + protected void registerBadSubCluster(SubClusterId badSC) { + badSubCluster.add(badSC); + if (mockRMs.contains(badSC)) { + mockRMs.get(badSC).setRunningMode(false); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/BaseRouterRMAdminTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/BaseRouterRMAdminTest.java new file mode 100644 index 00000000000..d3eba618028 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/BaseRouterRMAdminTest.java @@ -0,0 +1,346 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +/** + * Base class for all the RouterRMAdminService test cases. It provides utility + * methods that can be used by the concrete test case classes. + * + */ +public abstract class BaseRouterRMAdminTest { + + /** + * The RouterRMAdminService instance that will be used by all the test cases. + */ + private MockRouterRMAdminService rmAdminService; + /** + * Thread pool used for asynchronous operations. + */ + private static ExecutorService threadpool = Executors.newCachedThreadPool(); + private Configuration conf; + private AsyncDispatcher dispatcher; + + public final static int TEST_MAX_CACHE_SIZE = 10; + + protected MockRouterRMAdminService getRouterRMAdminService() { + Assert.assertNotNull(this.rmAdminService); + return this.rmAdminService; + } + + @Before + public void setUp() { + this.conf = new YarnConfiguration(); + String mockPassThroughInterceptorClass = + PassThroughRMAdminRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain will call the mock resource manager. The others in the chain will + // simply forward it to the next one in the chain + this.conf.set(YarnConfiguration.ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + + "," + mockPassThroughInterceptorClass + "," + + MockRMAdminRequestInterceptor.class.getName()); + + this.conf.setInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE, + TEST_MAX_CACHE_SIZE); + + this.dispatcher = new AsyncDispatcher(); + this.dispatcher.init(conf); + this.dispatcher.start(); + this.rmAdminService = createAndStartRouterRMAdminService(); + } + + @After + public void tearDown() { + if (rmAdminService != null) { + rmAdminService.stop(); + rmAdminService = null; + } + if (this.dispatcher != null) { + this.dispatcher.stop(); + } + } + + protected ExecutorService getThreadPool() { + return threadpool; + } + + protected MockRouterRMAdminService createAndStartRouterRMAdminService() { + MockRouterRMAdminService svc = new MockRouterRMAdminService(); + svc.init(conf); + svc.start(); + return svc; + } + + protected static class MockRouterRMAdminService extends RouterRMAdminService { + public MockRouterRMAdminService() { + super(); + } + } + + protected RefreshQueuesResponse refreshQueues(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RefreshQueuesResponse run() throws Exception { + RefreshQueuesRequest req = RefreshQueuesRequest.newInstance(); + RefreshQueuesResponse response = + getRouterRMAdminService().refreshQueues(req); + return response; + } + }); + } + + protected RefreshNodesResponse refreshNodes(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RefreshNodesResponse run() throws Exception { + RefreshNodesRequest req = RefreshNodesRequest.newInstance(); + RefreshNodesResponse response = + getRouterRMAdminService().refreshNodes(req); + return response; + } + }); + } + + protected RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( + String user) throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public RefreshSuperUserGroupsConfigurationResponse run() + throws Exception { + RefreshSuperUserGroupsConfigurationRequest req = + RefreshSuperUserGroupsConfigurationRequest.newInstance(); + RefreshSuperUserGroupsConfigurationResponse response = + getRouterRMAdminService() + .refreshSuperUserGroupsConfiguration(req); + return response; + } + }); + } + + protected RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( + String user) throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public RefreshUserToGroupsMappingsResponse run() throws Exception { + RefreshUserToGroupsMappingsRequest req = + RefreshUserToGroupsMappingsRequest.newInstance(); + RefreshUserToGroupsMappingsResponse response = + getRouterRMAdminService().refreshUserToGroupsMappings(req); + return response; + } + }); + } + + protected RefreshAdminAclsResponse refreshAdminAcls(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RefreshAdminAclsResponse run() throws Exception { + RefreshAdminAclsRequest req = RefreshAdminAclsRequest.newInstance(); + RefreshAdminAclsResponse response = + getRouterRMAdminService().refreshAdminAcls(req); + return response; + } + }); + } + + protected RefreshServiceAclsResponse refreshServiceAcls(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RefreshServiceAclsResponse run() throws Exception { + RefreshServiceAclsRequest req = + RefreshServiceAclsRequest.newInstance(); + RefreshServiceAclsResponse response = + getRouterRMAdminService().refreshServiceAcls(req); + return response; + } + }); + } + + protected UpdateNodeResourceResponse updateNodeResource(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public UpdateNodeResourceResponse run() throws Exception { + UpdateNodeResourceRequest req = + UpdateNodeResourceRequest.newInstance(null); + UpdateNodeResourceResponse response = + getRouterRMAdminService().updateNodeResource(req); + return response; + } + }); + } + + protected RefreshNodesResourcesResponse refreshNodesResources(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RefreshNodesResourcesResponse run() throws Exception { + RefreshNodesResourcesRequest req = + RefreshNodesResourcesRequest.newInstance(); + RefreshNodesResourcesResponse response = + getRouterRMAdminService().refreshNodesResources(req); + return response; + } + }); + } + + protected AddToClusterNodeLabelsResponse addToClusterNodeLabels(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AddToClusterNodeLabelsResponse run() throws Exception { + AddToClusterNodeLabelsRequest req = + AddToClusterNodeLabelsRequest.newInstance(null); + AddToClusterNodeLabelsResponse response = + getRouterRMAdminService().addToClusterNodeLabels(req); + return response; + } + }); + } + + protected RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + String user) throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public RemoveFromClusterNodeLabelsResponse run() throws Exception { + RemoveFromClusterNodeLabelsRequest req = + RemoveFromClusterNodeLabelsRequest.newInstance(null); + RemoveFromClusterNodeLabelsResponse response = + getRouterRMAdminService().removeFromClusterNodeLabels(req); + return response; + } + }); + } + + protected ReplaceLabelsOnNodeResponse replaceLabelsOnNode(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ReplaceLabelsOnNodeResponse run() throws Exception { + ReplaceLabelsOnNodeRequest req = ReplaceLabelsOnNodeRequest + .newInstance(new HashMap>()); + ReplaceLabelsOnNodeResponse response = + getRouterRMAdminService().replaceLabelsOnNode(req); + return response; + } + }); + } + + protected CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + String user) throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public CheckForDecommissioningNodesResponse run() throws Exception { + CheckForDecommissioningNodesRequest req = + CheckForDecommissioningNodesRequest.newInstance(); + CheckForDecommissioningNodesResponse response = + getRouterRMAdminService().checkForDecommissioningNodes(req); + return response; + } + }); + } + + protected RefreshClusterMaxPriorityResponse refreshClusterMaxPriority( + String user) throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user).doAs( + new PrivilegedExceptionAction() { + @Override + public RefreshClusterMaxPriorityResponse run() throws Exception { + RefreshClusterMaxPriorityRequest req = + RefreshClusterMaxPriorityRequest.newInstance(); + RefreshClusterMaxPriorityResponse response = + getRouterRMAdminService().refreshClusterMaxPriority(req); + return response; + } + }); + } + + protected String[] getGroupsForUser(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public String[] run() throws Exception { + String[] response = + getRouterRMAdminService().getGroupsForUser(user); + return response; + } + }); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/MockRMAdminRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/MockRMAdminRequestInterceptor.java new file mode 100644 index 00000000000..ab7bdb41ed2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/MockRMAdminRequestInterceptor.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.MockResourceManagerFacade; + +/** + * This class mocks the RMAmdinRequestInterceptor. + */ +public class MockRMAdminRequestInterceptor + extends DefaultRMAdminRequestInterceptor { + + public void init(String user) { + MockResourceManagerFacade mockRM = new MockResourceManagerFacade( + new YarnConfiguration(super.getConf()), 0); + super.setRMAdmin(mockRM); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/PassThroughRMAdminRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/PassThroughRMAdminRequestInterceptor.java new file mode 100644 index 00000000000..38dcc3d96d3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/PassThroughRMAdminRequestInterceptor.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import java.io.IOException; + +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; + +/** + * Mock intercepter that does not do anything other than forwarding it to the + * next intercepter in the chain. + */ +public class PassThroughRMAdminRequestInterceptor + extends AbstractRMAdminRequestInterceptor { + + @Override + public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) + throws StandbyException, YarnException, IOException { + return getNextInterceptor().refreshQueues(request); + } + + @Override + public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) + throws StandbyException, YarnException, IOException { + return getNextInterceptor().refreshNodes(request); + } + + @Override + public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( + RefreshSuperUserGroupsConfigurationRequest request) + throws StandbyException, YarnException, IOException { + return getNextInterceptor().refreshSuperUserGroupsConfiguration(request); + } + + @Override + public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( + RefreshUserToGroupsMappingsRequest request) + throws StandbyException, YarnException, IOException { + return getNextInterceptor().refreshUserToGroupsMappings(request); + } + + @Override + public RefreshAdminAclsResponse refreshAdminAcls( + RefreshAdminAclsRequest request) throws YarnException, IOException { + return getNextInterceptor().refreshAdminAcls(request); + } + + @Override + public RefreshServiceAclsResponse refreshServiceAcls( + RefreshServiceAclsRequest request) throws YarnException, IOException { + return getNextInterceptor().refreshServiceAcls(request); + } + + @Override + public UpdateNodeResourceResponse updateNodeResource( + UpdateNodeResourceRequest request) throws YarnException, IOException { + return getNextInterceptor().updateNodeResource(request); + } + + @Override + public RefreshNodesResourcesResponse refreshNodesResources( + RefreshNodesResourcesRequest request) throws YarnException, IOException { + return getNextInterceptor().refreshNodesResources(request); + } + + @Override + public AddToClusterNodeLabelsResponse addToClusterNodeLabels( + AddToClusterNodeLabelsRequest request) throws YarnException, IOException { + return getNextInterceptor().addToClusterNodeLabels(request); + } + + @Override + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) + throws YarnException, IOException { + return getNextInterceptor().removeFromClusterNodeLabels(request); + } + + @Override + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException { + return getNextInterceptor().replaceLabelsOnNode(request); + } + + @Override + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws YarnException, IOException { + return getNextInterceptor() + .checkForDecommissioningNodes(checkForDecommissioningNodesRequest); + } + + @Override + public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority( + RefreshClusterMaxPriorityRequest request) + throws YarnException, IOException { + return getNextInterceptor().refreshClusterMaxPriority(request); + } + + @Override + public String[] getGroupsForUser(String user) throws IOException { + return getNextInterceptor().getGroupsForUser(user); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java new file mode 100644 index 00000000000..11786e6f980 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java @@ -0,0 +1,219 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.router.rmadmin; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.router.rmadmin.RouterRMAdminService.RequestInterceptorChainWrapper; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test class to validate the RMAdmin Service inside the Router. + */ +public class TestRouterRMAdminService extends BaseRouterRMAdminTest { + + private static final Logger LOG = + LoggerFactory.getLogger(TestRouterRMAdminService.class); + + /** + * Tests if the pipeline is created properly. + */ + @Test + public void testRequestInterceptorChainCreation() throws Exception { + RMAdminRequestInterceptor root = + super.getRouterRMAdminService().createRequestInterceptorChain(); + int index = 0; + while (root != null) { + // The current pipeline is: + // PassThroughRMAdminRequestInterceptor - index = 0 + // PassThroughRMAdminRequestInterceptor - index = 1 + // PassThroughRMAdminRequestInterceptor - index = 2 + // MockClientRequestInterceptor - index = 3 + switch (index) { + case 0: // Fall to the next case + case 1: // Fall to the next case + case 2: + // If index is equal to 0,1 or 2 we fall in this check + Assert.assertEquals( + PassThroughRMAdminRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + case 3: + Assert.assertEquals(MockRMAdminRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + default: + Assert.fail(); + } + root = root.getNextInterceptor(); + index++; + } + Assert.assertEquals("The number of interceptors in chain does not match", 4, + index); + } + + /** + * Test if the RouterRMAdmin forwards all the requests to the MockRM and get + * back the responses. + */ + @Test + public void testRouterRMAdminServiceE2E() throws Exception { + + String user = "test1"; + + LOG.info("testRouterRMAdminServiceE2E - Refresh Queues"); + + RefreshQueuesResponse responseRefreshQueues = refreshQueues(user); + Assert.assertNotNull(responseRefreshQueues); + + LOG.info("testRouterRMAdminServiceE2E - Refresh Nodes"); + + RefreshNodesResponse responseRefreshNodes = refreshNodes(user); + Assert.assertNotNull(responseRefreshNodes); + + LOG.info("testRouterRMAdminServiceE2E - Refresh Super User"); + + RefreshSuperUserGroupsConfigurationResponse responseRefreshSuperUser = + refreshSuperUserGroupsConfiguration(user); + Assert.assertNotNull(responseRefreshSuperUser); + + LOG.info("testRouterRMAdminServiceE2E - Refresh User to Group"); + + RefreshUserToGroupsMappingsResponse responseRefreshUserToGroup = + refreshUserToGroupsMappings(user); + Assert.assertNotNull(responseRefreshUserToGroup); + + LOG.info("testRouterRMAdminServiceE2E - Refresh Admin Acls"); + + RefreshAdminAclsResponse responseRefreshAdminAcls = refreshAdminAcls(user); + Assert.assertNotNull(responseRefreshAdminAcls); + + LOG.info("testRouterRMAdminServiceE2E - Refresh Service Acls"); + + RefreshServiceAclsResponse responseRefreshServiceAcls = + refreshServiceAcls(user); + Assert.assertNotNull(responseRefreshServiceAcls); + + LOG.info("testRouterRMAdminServiceE2E - Update Node Resource"); + + UpdateNodeResourceResponse responseUpdateNodeResource = + updateNodeResource(user); + Assert.assertNotNull(responseUpdateNodeResource); + + LOG.info("testRouterRMAdminServiceE2E - Refresh Nodes Resource"); + + RefreshNodesResourcesResponse responseRefreshNodesResources = + refreshNodesResources(user); + Assert.assertNotNull(responseRefreshNodesResources); + + LOG.info("testRouterRMAdminServiceE2E - Add To Cluster NodeLabels"); + + AddToClusterNodeLabelsResponse responseAddToClusterNodeLabels = + addToClusterNodeLabels(user); + Assert.assertNotNull(responseAddToClusterNodeLabels); + + LOG.info("testRouterRMAdminServiceE2E - Remove To Cluster NodeLabels"); + + RemoveFromClusterNodeLabelsResponse responseRemoveFromClusterNodeLabels = + removeFromClusterNodeLabels(user); + Assert.assertNotNull(responseRemoveFromClusterNodeLabels); + + LOG.info("testRouterRMAdminServiceE2E - Replace Labels On Node"); + + ReplaceLabelsOnNodeResponse responseReplaceLabelsOnNode = + replaceLabelsOnNode(user); + Assert.assertNotNull(responseReplaceLabelsOnNode); + + LOG.info("testRouterRMAdminServiceE2E - Check For Decommissioning Nodes"); + + CheckForDecommissioningNodesResponse responseCheckForDecom = + checkForDecommissioningNodes(user); + Assert.assertNotNull(responseCheckForDecom); + + LOG.info("testRouterRMAdminServiceE2E - Refresh Cluster Max Priority"); + + RefreshClusterMaxPriorityResponse responseRefreshClusterMaxPriority = + refreshClusterMaxPriority(user); + Assert.assertNotNull(responseRefreshClusterMaxPriority); + + LOG.info("testRouterRMAdminServiceE2E - Get Groups For User"); + + String[] responseGetGroupsForUser = getGroupsForUser(user); + Assert.assertNotNull(responseGetGroupsForUser); + + } + + /** + * Test if the different chains for users are generated, and LRU cache is + * working as expected. + */ + @Test + public void testUsersChainMapWithLRUCache() + throws YarnException, IOException, InterruptedException { + + Map pipelines; + RequestInterceptorChainWrapper chain; + + refreshQueues("test1"); + refreshQueues("test2"); + refreshQueues("test3"); + refreshQueues("test4"); + refreshQueues("test5"); + refreshQueues("test6"); + refreshQueues("test7"); + refreshQueues("test8"); + + pipelines = super.getRouterRMAdminService().getPipelines(); + Assert.assertEquals(8, pipelines.size()); + + refreshQueues("test9"); + refreshQueues("test10"); + refreshQueues("test1"); + refreshQueues("test11"); + + // The cache max size is defined in + // BaseRouterClientRMTest.TEST_MAX_CACHE_SIZE + Assert.assertEquals(10, pipelines.size()); + + chain = pipelines.get("test1"); + Assert.assertNotNull("test1 should not be evicted", chain); + + chain = pipelines.get("test2"); + Assert.assertNull("test2 should have been evicted", chain); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java new file mode 100644 index 00000000000..7d420844a42 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/BaseRouterWebServicesTest.java @@ -0,0 +1,614 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.router.Router; +import org.apache.hadoop.yarn.server.router.webapp.RouterWebServices.RequestInterceptorChainWrapper; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.mockito.Mockito; + +/** + * Base class for all the RouterRMAdminService test cases. It provides utility + * methods that can be used by the concrete test case classes. + * + */ +public abstract class BaseRouterWebServicesTest { + + private YarnConfiguration conf; + + private Router router; + public final static int TEST_MAX_CACHE_SIZE = 10; + + private RouterWebServices routerWebService; + + @Before + public void setUp() { + this.conf = createConfiguration(); + + router = spy(new Router()); + Mockito.doNothing().when(router).startWepApp(); + routerWebService = new RouterWebServices(router, conf); + routerWebService.setResponse(mock(HttpServletResponse.class)); + + router.init(conf); + router.start(); + } + + protected YarnConfiguration createConfiguration() { + YarnConfiguration config = new YarnConfiguration(); + String mockPassThroughInterceptorClass = + PassThroughRESTRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain will call the mock resource manager. The others in the chain will + // simply forward it to the next one in the chain + config.set(YarnConfiguration.ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass + + "," + mockPassThroughInterceptorClass + "," + + MockRESTRequestInterceptor.class.getName()); + + config.setInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE, + TEST_MAX_CACHE_SIZE); + return config; + } + + @After + public void tearDown() { + if (router != null) { + router.stop(); + } + } + + public void setUpConfig() { + this.conf = createConfiguration(); + } + + protected Configuration getConf() { + return this.conf; + } + + protected RouterWebServices getRouterWebServices() { + Assert.assertNotNull(this.routerWebService); + return this.routerWebService; + } + + protected ClusterInfo get(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ClusterInfo run() throws Exception { + return routerWebService.get(); + } + }); + } + + protected ClusterInfo getClusterInfo(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ClusterInfo run() throws Exception { + return routerWebService.getClusterInfo(); + } + }); + } + + protected ClusterMetricsInfo getClusterMetricsInfo(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ClusterMetricsInfo run() throws Exception { + return routerWebService.getClusterMetricsInfo(); + } + }); + } + + protected SchedulerTypeInfo getSchedulerInfo(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public SchedulerTypeInfo run() throws Exception { + return routerWebService.getSchedulerInfo(); + } + }); + } + + protected String dumpSchedulerLogs(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public String run() throws Exception { + return routerWebService.dumpSchedulerLogs(null, null); + } + }); + } + + protected NodesInfo getNodes(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public NodesInfo run() throws Exception { + return routerWebService.getNodes(null); + } + }); + } + + protected NodeInfo getNode(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public NodeInfo run() throws Exception { + return routerWebService.getNode(null); + } + }); + } + + protected AppsInfo getApps(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppsInfo run() throws Exception { + return routerWebService.getApps(null, null, null, null, null, null, + null, null, null, null, null, null, null, null); + } + }); + } + + protected ActivitiesInfo getActivities(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ActivitiesInfo run() throws Exception { + return routerWebService.getActivities(null, null); + } + }); + } + + protected AppActivitiesInfo getAppActivities(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppActivitiesInfo run() throws Exception { + return routerWebService.getAppActivities(null, null, null); + } + }); + } + + protected ApplicationStatisticsInfo getAppStatistics(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ApplicationStatisticsInfo run() throws Exception { + return routerWebService.getAppStatistics(null, null, null); + } + }); + } + + protected AppInfo getApp(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppInfo run() throws Exception { + return routerWebService.getApp(null, null, null); + } + }); + } + + protected AppState getAppState(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppState run() throws Exception { + return routerWebService.getAppState(null, null); + } + }); + } + + protected Response updateAppState(String user) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.updateAppState(null, null, null); + } + }); + } + + protected NodeToLabelsInfo getNodeToLabels(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public NodeToLabelsInfo run() throws Exception { + return routerWebService.getNodeToLabels(null); + } + }); + } + + protected LabelsToNodesInfo getLabelsToNodes(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public LabelsToNodesInfo run() throws Exception { + return routerWebService.getLabelsToNodes(null); + } + }); + } + + protected Response replaceLabelsOnNodes(String user) throws Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.replaceLabelsOnNodes(null, null); + } + }); + } + + protected Response replaceLabelsOnNode(String user) throws Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.replaceLabelsOnNode(null, null, null); + } + }); + } + + protected NodeLabelsInfo getClusterNodeLabels(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public NodeLabelsInfo run() throws Exception { + return routerWebService.getClusterNodeLabels(null); + } + }); + } + + protected Response addToClusterNodeLabels(String user) throws Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.addToClusterNodeLabels(null, null); + } + }); + } + + protected Response removeFromCluserNodeLabels(String user) throws Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.removeFromCluserNodeLabels(null, null); + } + }); + } + + protected NodeLabelsInfo getLabelsOnNode(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public NodeLabelsInfo run() throws Exception { + return routerWebService.getLabelsOnNode(null, null); + } + }); + } + + protected AppPriority getAppPriority(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppPriority run() throws Exception { + return routerWebService.getAppPriority(null, null); + } + }); + } + + protected Response updateApplicationPriority(String user) + throws AuthorizationException, YarnException, InterruptedException, + IOException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.updateApplicationPriority(null, null, null); + } + }); + } + + protected AppQueue getAppQueue(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppQueue run() throws Exception { + return routerWebService.getAppQueue(null, null); + } + }); + } + + protected Response updateAppQueue(String user) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.updateAppQueue(null, null, null); + } + }); + } + + protected Response createNewApplication(String user) + throws AuthorizationException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.createNewApplication(null); + } + }); + } + + protected Response submitApplication(String user) + throws AuthorizationException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.submitApplication(null, null); + } + }); + } + + protected Response postDelegationToken(String user) + throws AuthorizationException, IOException, InterruptedException, + Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.postDelegationToken(null, null); + } + }); + } + + protected Response postDelegationTokenExpiration(String user) + throws AuthorizationException, IOException, Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.postDelegationTokenExpiration(null); + } + }); + } + + protected Response cancelDelegationToken(String user) + throws AuthorizationException, IOException, InterruptedException, + Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.cancelDelegationToken(null); + } + }); + } + + protected Response createNewReservation(String user) + throws AuthorizationException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.createNewReservation(null); + } + }); + } + + protected Response submitReservation(String user) + throws AuthorizationException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.submitReservation(null, null); + } + }); + } + + protected Response updateReservation(String user) + throws AuthorizationException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.updateReservation(null, null); + } + }); + } + + protected Response deleteReservation(String user) + throws AuthorizationException, IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.deleteReservation(null, null); + } + }); + } + + protected Response listReservation(String user) throws Exception { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.listReservation(null, null, 0, 0, false, + null); + } + }); + } + + protected AppTimeoutInfo getAppTimeout(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppTimeoutInfo run() throws Exception { + return routerWebService.getAppTimeout(null, null, null); + } + }); + } + + protected AppTimeoutsInfo getAppTimeouts(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppTimeoutsInfo run() throws Exception { + return routerWebService.getAppTimeouts(null, null); + } + }); + } + + protected Response updateApplicationTimeout(String user) + throws AuthorizationException, YarnException, InterruptedException, + IOException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public Response run() throws Exception { + return routerWebService.updateApplicationTimeout(null, null, null); + } + }); + } + + protected AppAttemptsInfo getAppAttempts(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppAttemptsInfo run() throws Exception { + return routerWebService.getAppAttempts(null, null); + } + }); + } + + protected AppAttemptInfo getAppAttempt(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public AppAttemptInfo run() throws Exception { + return routerWebService.getAppAttempt(null, null, null, null); + } + }); + } + + protected ContainersInfo getContainers(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ContainersInfo run() throws Exception { + return routerWebService.getContainers(null, null, null, null); + } + }); + } + + protected ContainerInfo getContainer(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public ContainerInfo run() throws Exception { + return routerWebService.getContainer(null, null, null, null, null); + } + }); + } + + protected RequestInterceptorChainWrapper getInterceptorChain(String user) + throws IOException, InterruptedException { + return UserGroupInformation.createRemoteUser(user) + .doAs(new PrivilegedExceptionAction() { + @Override + public RequestInterceptorChainWrapper run() throws Exception { + return routerWebService.getInterceptorChain(); + } + }); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/JavaProcess.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/JavaProcess.java new file mode 100644 index 00000000000..d32013f34b2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/JavaProcess.java @@ -0,0 +1,52 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.File; +import java.io.IOException; + +/** + * Helper class to start a new process. + */ +public class JavaProcess { + + private Process process = null; + + public JavaProcess(Class klass) throws IOException, InterruptedException { + String javaHome = System.getProperty("java.home"); + String javaBin = + javaHome + File.separator + "bin" + File.separator + "java"; + String classpath = System.getProperty("java.class.path"); + classpath = classpath.concat("./src/test/resources"); + String className = klass.getCanonicalName(); + ProcessBuilder builder = + new ProcessBuilder(javaBin, "-cp", classpath, className); + builder.inheritIO(); + process = builder.start(); + } + + public void stop() throws InterruptedException { + if (process != null) { + process.destroy(); + process.waitFor(); + process.exitValue(); + } + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java new file mode 100644 index 00000000000..91e601eeb1a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.*; +import org.apache.hadoop.yarn.webapp.NotFoundException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import java.io.IOException; +import java.net.ConnectException; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This class mocks the RESTRequestInterceptor. + */ +public class MockDefaultRequestInterceptorREST + extends DefaultRequestInterceptorREST { + + private static final Logger LOG = + LoggerFactory.getLogger(MockDefaultRequestInterceptorREST.class); + final private AtomicInteger applicationCounter = new AtomicInteger(0); + // True if the Mock RM is running, false otherwise. + // This property allows us to write tests for specific scenario as Yarn RM + // down e.g. network issue, failover. + private boolean isRunning = true; + private HashSet applicationMap = new HashSet<>(); + + private void validateRunning() throws ConnectException { + if (!isRunning) { + throw new ConnectException("RM is stopped"); + } + } + + @Override + public Response createNewApplication(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + validateRunning(); + + ApplicationId applicationId = + ApplicationId.newInstance(Integer.valueOf(getSubClusterId().getId()), + applicationCounter.incrementAndGet()); + NewApplication appId = + new NewApplication(applicationId.toString(), new ResourceInfo()); + return Response.status(Status.OK).entity(appId).build(); + } + + @Override + public Response submitApplication(ApplicationSubmissionContextInfo newApp, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + validateRunning(); + + ApplicationId appId = ApplicationId.fromString(newApp.getApplicationId()); + LOG.info("Application submitted: " + appId); + applicationMap.add(appId); + return Response.status(Status.ACCEPTED).header(HttpHeaders.LOCATION, "") + .entity(getSubClusterId()).build(); + } + + @Override + public AppInfo getApp(HttpServletRequest hsr, String appId, + Set unselectedFields) { + if (!isRunning) { + throw new RuntimeException("RM is stopped"); + } + + ApplicationId applicationId = ApplicationId.fromString(appId); + if (!applicationMap.contains(applicationId)) { + throw new NotFoundException("app with id: " + appId + " not found"); + } + + return new AppInfo(); + } + + @Override + public Response updateAppState(AppState targetState, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + validateRunning(); + + ApplicationId applicationId = ApplicationId.fromString(appId); + if (!applicationMap.remove(applicationId)) { + throw new ApplicationNotFoundException( + "Trying to kill an absent application: " + appId); + } + + if (targetState == null) { + return Response.status(Status.BAD_REQUEST).build(); + } + + LOG.info("Force killing application: " + appId); + AppState ret = new AppState(); + ret.setState(targetState.toString()); + return Response.status(Status.OK).entity(ret).build(); + } + + public void setSubClusterId(int subClusterId) { + setSubClusterId(SubClusterId.newInstance(Integer.toString(subClusterId))); + } + + public boolean isRunning() { + return isRunning; + } + + public void setRunning(boolean runningMode) { + this.isRunning = runningMode; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java new file mode 100644 index 00000000000..69afdeaf49c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java @@ -0,0 +1,340 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.Set; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; + +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; + +/** + * This class mocks the RESTRequestInterceptor. + */ +public class MockRESTRequestInterceptor extends AbstractRESTRequestInterceptor { + + @Override + public void setNextInterceptor(RESTRequestInterceptor next) { + throw new YarnRuntimeException( + "setNextInterceptor is being called on MockRESTRequestInterceptor," + + "which should be the last one in the chain. " + + "Check if the interceptor pipeline configuration is correct"); + } + + @Override + public ClusterInfo get() { + return new ClusterInfo(); + } + + @Override + public ClusterInfo getClusterInfo() { + return new ClusterInfo(); + } + + @Override + public ClusterMetricsInfo getClusterMetricsInfo() { + return new ClusterMetricsInfo(); + } + + @Override + public SchedulerTypeInfo getSchedulerInfo() { + return new SchedulerTypeInfo(); + } + + @Override + public String dumpSchedulerLogs(String time, HttpServletRequest hsr) + throws IOException { + return "Done"; + } + + @Override + public NodesInfo getNodes(String states) { + return new NodesInfo(); + } + + @Override + public NodeInfo getNode(String nodeId) { + return new NodeInfo(); + } + + @SuppressWarnings("checkstyle:parameternumber") + @Override + public AppsInfo getApps(HttpServletRequest hsr, String stateQuery, + Set statesQuery, String finalStatusQuery, String userQuery, + String queueQuery, String count, String startedBegin, String startedEnd, + String finishBegin, String finishEnd, Set applicationTypes, + Set applicationTags, Set unselectedFields) { + return new AppsInfo(); + } + + @Override + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { + return new ActivitiesInfo(); + } + + @Override + public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, + String appId, String time) { + return new AppActivitiesInfo(); + } + + @Override + public ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr, + Set stateQueries, Set typeQueries) { + return new ApplicationStatisticsInfo(); + } + + @Override + public AppInfo getApp(HttpServletRequest hsr, String appId, + Set unselectedFields) { + return new AppInfo(); + } + + @Override + public AppState getAppState(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return new AppState(); + } + + @Override + public Response updateAppState(AppState targetState, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + return Response.status(Status.OK).build(); + } + + @Override + public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) + throws IOException { + return new NodeToLabelsInfo(); + } + + @Override + public LabelsToNodesInfo getLabelsToNodes(Set labels) + throws IOException { + return new LabelsToNodesInfo(); + } + + @Override + public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, + HttpServletRequest hsr) throws Exception { + return Response.status(Status.OK).build(); + } + + @Override + public Response replaceLabelsOnNode(Set newNodeLabelsName, + HttpServletRequest hsr, String nodeId) throws Exception { + return Response.status(Status.OK).build(); + } + + @Override + public NodeLabelsInfo getClusterNodeLabels(HttpServletRequest hsr) + throws IOException { + return new NodeLabelsInfo(); + } + + @Override + public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, + HttpServletRequest hsr) throws Exception { + return Response.status(Status.OK).build(); + } + + @Override + public Response removeFromCluserNodeLabels(Set oldNodeLabels, + HttpServletRequest hsr) throws Exception { + return Response.status(Status.OK).build(); + } + + @Override + public NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId) + throws IOException { + return new NodeLabelsInfo(); + } + + @Override + public AppPriority getAppPriority(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return new AppPriority(); + } + + @Override + public Response updateApplicationPriority(AppPriority targetPriority, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return Response.status(Status.OK).build(); + } + + @Override + public AppQueue getAppQueue(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return new AppQueue(); + } + + @Override + public Response updateAppQueue(AppQueue targetQueue, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + return Response.status(Status.OK).build(); + } + + @Override + public Response createNewApplication(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return Response.status(Status.OK).build(); + } + + @Override + public Response submitApplication(ApplicationSubmissionContextInfo newApp, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return Response.status(Status.OK).build(); + } + + @Override + public Response postDelegationToken(DelegationToken tokenData, + HttpServletRequest hsr) throws AuthorizationException, IOException, + InterruptedException, Exception { + return Response.status(Status.OK).build(); + } + + @Override + public Response postDelegationTokenExpiration(HttpServletRequest hsr) + throws AuthorizationException, IOException, Exception { + return Response.status(Status.OK).build(); + } + + @Override + public Response cancelDelegationToken(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + return Response.status(Status.OK).build(); + } + + @Override + public Response createNewReservation(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return Response.status(Status.OK).build(); + } + + @Override + public Response submitReservation(ReservationSubmissionRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return Response.status(Status.OK).build(); + } + + @Override + public Response updateReservation(ReservationUpdateRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return Response.status(Status.OK).build(); + } + + @Override + public Response deleteReservation(ReservationDeleteRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return Response.status(Status.OK).build(); + } + + @Override + public Response listReservation(String queue, String reservationId, + long startTime, long endTime, boolean includeResourceAllocations, + HttpServletRequest hsr) throws Exception { + return Response.status(Status.OK).build(); + } + + @Override + public AppTimeoutInfo getAppTimeout(HttpServletRequest hsr, String appId, + String type) throws AuthorizationException { + return new AppTimeoutInfo(); + } + + @Override + public AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return new AppTimeoutsInfo(); + } + + @Override + public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return Response.status(Status.OK).build(); + } + + @Override + public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) { + return new AppAttemptsInfo(); + } + + @Override + public AppAttemptInfo getAppAttempt(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + return new AppAttemptInfo(); + } + + @Override + public ContainersInfo getContainers(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + return new ContainersInfo(); + } + + @Override + public ContainerInfo getContainer(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId, + String containerId) { + return new ContainerInfo(); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java new file mode 100644 index 00000000000..ea985a2f232 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java @@ -0,0 +1,339 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.Set; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; + +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; + +/** + * Mock intercepter that does not do anything other than forwarding it to the + * next intercepter in the chain. + */ +public class PassThroughRESTRequestInterceptor + extends AbstractRESTRequestInterceptor { + + @Override + public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) { + return getNextInterceptor().getAppAttempts(hsr, appId); + } + + @Override + public AppAttemptInfo getAppAttempt(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + return getNextInterceptor().getAppAttempt(req, res, appId, appAttemptId); + } + + @Override + public ContainersInfo getContainers(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId) { + return getNextInterceptor().getContainers(req, res, appId, appAttemptId); + } + + @Override + public ContainerInfo getContainer(HttpServletRequest req, + HttpServletResponse res, String appId, String appAttemptId, + String containerId) { + return getNextInterceptor().getContainer(req, res, appId, appAttemptId, + containerId); + } + + @Override + public ClusterInfo get() { + return getNextInterceptor().get(); + } + + @Override + public ClusterInfo getClusterInfo() { + return getNextInterceptor().getClusterInfo(); + } + + @Override + public ClusterMetricsInfo getClusterMetricsInfo() { + return getNextInterceptor().getClusterMetricsInfo(); + } + + @Override + public SchedulerTypeInfo getSchedulerInfo() { + return getNextInterceptor().getSchedulerInfo(); + } + + @Override + public String dumpSchedulerLogs(String time, HttpServletRequest hsr) + throws IOException { + return getNextInterceptor().dumpSchedulerLogs(time, hsr); + } + + @Override + public NodesInfo getNodes(String states) { + return getNextInterceptor().getNodes(states); + } + + @Override + public NodeInfo getNode(String nodeId) { + return getNextInterceptor().getNode(nodeId); + } + + @Override + public AppsInfo getApps(HttpServletRequest hsr, String stateQuery, + Set statesQuery, String finalStatusQuery, String userQuery, + String queueQuery, String count, String startedBegin, String startedEnd, + String finishBegin, String finishEnd, Set applicationTypes, + Set applicationTags, Set unselectedFields) { + return getNextInterceptor().getApps(hsr, stateQuery, statesQuery, + finalStatusQuery, userQuery, queueQuery, count, startedBegin, + startedEnd, finishBegin, finishEnd, applicationTypes, applicationTags, + unselectedFields); + } + + @Override + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId) { + return getNextInterceptor().getActivities(hsr, nodeId); + } + + @Override + public AppActivitiesInfo getAppActivities(HttpServletRequest hsr, + String appId, String time) { + return getNextInterceptor().getAppActivities(hsr, appId, time); + } + + @Override + public ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr, + Set stateQueries, Set typeQueries) { + return getNextInterceptor().getAppStatistics(hsr, stateQueries, + typeQueries); + } + + @Override + public AppInfo getApp(HttpServletRequest hsr, String appId, + Set unselectedFields) { + return getNextInterceptor().getApp(hsr, appId, unselectedFields); + } + + @Override + public AppState getAppState(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return getNextInterceptor().getAppState(hsr, appId); + } + + @Override + public Response updateAppState(AppState targetState, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + return getNextInterceptor().updateAppState(targetState, hsr, appId); + } + + @Override + public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) + throws IOException { + return getNextInterceptor().getNodeToLabels(hsr); + } + + @Override + public LabelsToNodesInfo getLabelsToNodes(Set labels) + throws IOException { + return getNextInterceptor().getLabelsToNodes(labels); + } + + @Override + public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, + HttpServletRequest hsr) throws Exception { + return getNextInterceptor().replaceLabelsOnNodes(newNodeToLabels, hsr); + } + + @Override + public Response replaceLabelsOnNode(Set newNodeLabelsName, + HttpServletRequest hsr, String nodeId) throws Exception { + return getNextInterceptor().replaceLabelsOnNode(newNodeLabelsName, hsr, + nodeId); + } + + @Override + public NodeLabelsInfo getClusterNodeLabels(HttpServletRequest hsr) + throws IOException { + return getNextInterceptor().getClusterNodeLabels(hsr); + } + + @Override + public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, + HttpServletRequest hsr) throws Exception { + return getNextInterceptor().addToClusterNodeLabels(newNodeLabels, hsr); + } + + @Override + public Response removeFromCluserNodeLabels(Set oldNodeLabels, + HttpServletRequest hsr) throws Exception { + return getNextInterceptor().removeFromCluserNodeLabels(oldNodeLabels, hsr); + } + + @Override + public NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId) + throws IOException { + return getNextInterceptor().getLabelsOnNode(hsr, nodeId); + } + + @Override + public AppPriority getAppPriority(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return getNextInterceptor().getAppPriority(hsr, appId); + } + + @Override + public Response updateApplicationPriority(AppPriority targetPriority, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return getNextInterceptor().updateApplicationPriority(targetPriority, hsr, + appId); + } + + @Override + public AppQueue getAppQueue(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return getNextInterceptor().getAppQueue(hsr, appId); + } + + @Override + public Response updateAppQueue(AppQueue targetQueue, HttpServletRequest hsr, + String appId) throws AuthorizationException, YarnException, + InterruptedException, IOException { + return getNextInterceptor().updateAppQueue(targetQueue, hsr, appId); + } + + @Override + public Response createNewApplication(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return getNextInterceptor().createNewApplication(hsr); + } + + @Override + public Response submitApplication(ApplicationSubmissionContextInfo newApp, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return getNextInterceptor().submitApplication(newApp, hsr); + } + + @Override + public Response postDelegationToken(DelegationToken tokenData, + HttpServletRequest hsr) throws AuthorizationException, IOException, + InterruptedException, Exception { + return getNextInterceptor().postDelegationToken(tokenData, hsr); + } + + @Override + public Response postDelegationTokenExpiration(HttpServletRequest hsr) + throws AuthorizationException, IOException, Exception { + return getNextInterceptor().postDelegationTokenExpiration(hsr); + } + + @Override + public Response cancelDelegationToken(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException, + Exception { + return getNextInterceptor().cancelDelegationToken(hsr); + } + + @Override + public Response createNewReservation(HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return getNextInterceptor().createNewReservation(hsr); + } + + @Override + public Response submitReservation(ReservationSubmissionRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return getNextInterceptor().submitReservation(resContext, hsr); + } + + @Override + public Response updateReservation(ReservationUpdateRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return getNextInterceptor().updateReservation(resContext, hsr); + } + + @Override + public Response deleteReservation(ReservationDeleteRequestInfo resContext, + HttpServletRequest hsr) + throws AuthorizationException, IOException, InterruptedException { + return getNextInterceptor().deleteReservation(resContext, hsr); + } + + @Override + public Response listReservation(String queue, String reservationId, + long startTime, long endTime, boolean includeResourceAllocations, + HttpServletRequest hsr) throws Exception { + return getNextInterceptor().listReservation(queue, reservationId, startTime, + endTime, includeResourceAllocations, hsr); + } + + @Override + public AppTimeoutInfo getAppTimeout(HttpServletRequest hsr, String appId, + String type) throws AuthorizationException { + return getNextInterceptor().getAppTimeout(hsr, appId, type); + } + + @Override + public AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId) + throws AuthorizationException { + return getNextInterceptor().getAppTimeouts(hsr, appId); + } + + @Override + public Response updateApplicationTimeout(AppTimeoutInfo appTimeout, + HttpServletRequest hsr, String appId) throws AuthorizationException, + YarnException, InterruptedException, IOException { + return getNextInterceptor().updateApplicationTimeout(appTimeout, hsr, + appId); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java new file mode 100644 index 00000000000..d918149e6a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java @@ -0,0 +1,379 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import javax.ws.rs.core.Response; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreTestUtil; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extends the {@code BaseRouterClientRMTest} and overrides methods in order to + * use the {@code RouterClientRMService} pipeline test cases for testing the + * {@code FederationInterceptor} class. The tests for + * {@code RouterClientRMService} has been written cleverly so that it can be + * reused to validate different request intercepter chains. + */ +public class TestFederationInterceptorREST extends BaseRouterWebServicesTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestFederationInterceptorREST.class); + private final static int NUM_SUBCLUSTER = 4; + private static final int BAD_REQUEST = 400; + private static final int ACCEPTED = 202; + private static String user = "test-user"; + private TestableFederationInterceptorREST interceptor; + private MemoryFederationStateStore stateStore; + private FederationStateStoreTestUtil stateStoreUtil; + private List subClusters; + + @Override + public void setUp() { + super.setUpConfig(); + interceptor = new TestableFederationInterceptorREST(); + + stateStore = new MemoryFederationStateStore(); + stateStore.init(this.getConf()); + FederationStateStoreFacade.getInstance().reinitialize(stateStore, + this.getConf()); + stateStoreUtil = new FederationStateStoreTestUtil(stateStore); + + interceptor.setConf(this.getConf()); + interceptor.init(user); + + subClusters = new ArrayList<>(); + + try { + for (int i = 0; i < NUM_SUBCLUSTER; i++) { + SubClusterId sc = SubClusterId.newInstance(Integer.toString(i)); + stateStoreUtil.registerSubCluster(sc); + subClusters.add(sc); + } + } catch (YarnException e) { + LOG.error(e.getMessage()); + Assert.fail(); + } + + } + + @Override + public void tearDown() { + interceptor.shutdown(); + super.tearDown(); + } + + @Override + protected YarnConfiguration createConfiguration() { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + conf.set(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS, + MockDefaultRequestInterceptorREST.class.getName()); + String mockPassThroughInterceptorClass = + PassThroughRESTRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain is the federation intercepter that calls the mock resource manager. + // The others in the chain will simply forward it to the next one in the + // chain + conf.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + + TestableFederationInterceptorREST.class.getName()); + + conf.set(YarnConfiguration.FEDERATION_POLICY_MANAGER, + UniformBroadcastPolicyManager.class.getName()); + + // Disable StateStoreFacade cache + conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0); + + return conf; + } + + /** + * This test validates the correctness of GetNewApplication. The return + * ApplicationId has to belong to one of the SubCluster in the cluster. + */ + @Test + public void testGetNewApplication() + throws YarnException, IOException, InterruptedException { + + Response response = interceptor.createNewApplication(null); + + Assert.assertNotNull(response); + NewApplication ci = (NewApplication) response.getEntity(); + Assert.assertNotNull(ci); + ApplicationId appId = ApplicationId.fromString(ci.getApplicationId()); + Assert.assertTrue(appId.getClusterTimestamp() < NUM_SUBCLUSTER); + Assert.assertTrue(appId.getClusterTimestamp() >= 0); + } + + /** + * This test validates the correctness of SubmitApplication. The application + * has to be submitted to one of the SubCluster in the cluster. + */ + @Test + public void testSubmitApplication() + throws YarnException, IOException, InterruptedException { + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + Response response = interceptor.submitApplication(context, null); + Assert.assertEquals(ACCEPTED, response.getStatus()); + SubClusterId ci = (SubClusterId) response.getEntity(); + + Assert.assertNotNull(response); + SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId); + Assert.assertNotNull(scIdResult); + Assert.assertTrue(subClusters.contains(scIdResult)); + Assert.assertEquals(ci, scIdResult); + } + + /** + * This test validates the correctness of SubmitApplication in case of + * multiple submission. The first retry has to be submitted to the same + * SubCluster of the first attempt. + */ + @Test + public void testSubmitApplicationMultipleSubmission() + throws YarnException, IOException, InterruptedException { + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + // First attempt + Response response = interceptor.submitApplication(context, null); + Assert.assertNotNull(response); + Assert.assertEquals(ACCEPTED, response.getStatus()); + + SubClusterId scIdResult = stateStoreUtil.queryApplicationHomeSC(appId); + Assert.assertNotNull(scIdResult); + + // First retry + response = interceptor.submitApplication(context, null); + + Assert.assertNotNull(response); + Assert.assertEquals(ACCEPTED, response.getStatus()); + SubClusterId scIdResult2 = stateStoreUtil.queryApplicationHomeSC(appId); + Assert.assertNotNull(scIdResult2); + Assert.assertEquals(scIdResult, scIdResult2); + } + + /** + * This test validates the correctness of SubmitApplication in case of empty + * request. + */ + @Test + public void testSubmitApplicationEmptyRequest() + throws YarnException, IOException, InterruptedException { + + // ApplicationSubmissionContextInfo null + Response response = interceptor.submitApplication(null, null); + + Assert.assertEquals(BAD_REQUEST, response.getStatus()); + + // ApplicationSubmissionContextInfo empty + response = interceptor + .submitApplication(new ApplicationSubmissionContextInfo(), null); + + Assert.assertEquals(BAD_REQUEST, response.getStatus()); + + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + response = interceptor.submitApplication(context, null); + Assert.assertEquals(BAD_REQUEST, response.getStatus()); + } + + /** + * This test validates the correctness of SubmitApplication in case of of + * application in wrong format. + */ + @Test + public void testSubmitApplicationWrongFormat() + throws YarnException, IOException, InterruptedException { + + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId("Application_wrong_id"); + Response response = interceptor.submitApplication(context, null); + Assert.assertEquals(BAD_REQUEST, response.getStatus()); + } + + /** + * This test validates the correctness of ForceKillApplication in case the + * application exists in the cluster. + */ + @Test + public void testForceKillApplication() + throws YarnException, IOException, InterruptedException { + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + // Submit the application we are going to kill later + Response response = interceptor.submitApplication(context, null); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + AppState appState = new AppState("KILLED"); + + Response responseKill = + interceptor.updateAppState(appState, null, appId.toString()); + Assert.assertNotNull(responseKill); + } + + /** + * This test validates the correctness of ForceKillApplication in case of + * application does not exist in StateStore. + */ + @Test + public void testForceKillApplicationNotExists() + throws YarnException, IOException, InterruptedException { + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + AppState appState = new AppState("KILLED"); + try { + interceptor.updateAppState(appState, null, appId.toString()); + Assert.fail(); + } catch (YarnException e) { + Assert.assertTrue( + e.getMessage().equals("Application " + appId + " does not exist")); + } + } + + /** + * This test validates the correctness of ForceKillApplication in case of + * application in wrong format. + */ + @Test + public void testForceKillApplicationWrongFormat() + throws YarnException, IOException, InterruptedException { + + AppState appState = new AppState("KILLED"); + Response response = + interceptor.updateAppState(appState, null, "Application_wrong_id"); + Assert.assertEquals(BAD_REQUEST, response.getStatus()); + } + + /** + * This test validates the correctness of ForceKillApplication in case of + * empty request. + */ + @Test + public void testForceKillApplicationEmptyRequest() + throws YarnException, IOException, InterruptedException { + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + // Submit the application we are going to kill later + interceptor.submitApplication(context, null); + + Response response = + interceptor.updateAppState(null, null, appId.toString()); + Assert.assertEquals(BAD_REQUEST, response.getStatus()); + + } + + /** + * This test validates the correctness of GetApplicationReport in case the + * application exists in the cluster. + */ + @Test + public void testGetApplicationReport() + throws YarnException, IOException, InterruptedException { + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + // Submit the application we want the report later + Response response = interceptor.submitApplication(context, null); + + Assert.assertNotNull(response); + Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); + + AppInfo responseGet = interceptor.getApp(null, appId.toString(), null); + + Assert.assertNotNull(responseGet); + } + + /** + * This test validates the correctness of GetApplicationReport in case the + * application does not exist in StateStore. + */ + @Test + public void testGetApplicationNotExists() + throws YarnException, IOException, InterruptedException { + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + + AppInfo response = interceptor.getApp(null, appId.toString(), null); + + Assert.assertNull(response); + } + + /** + * This test validates the correctness of GetApplicationReport in case of + * application in wrong format. + */ + @Test + public void testGetApplicationWrongFormat() + throws YarnException, IOException, InterruptedException { + + AppInfo response = interceptor.getApp(null, "Application_wrong_id", null); + + Assert.assertNull(response); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java new file mode 100644 index 00000000000..48bc1a8043f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorRESTRetry.java @@ -0,0 +1,274 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import javax.ws.rs.core.Response; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils; +import org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager; +import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore; +import org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreTestUtil; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication; +import org.apache.hadoop.yarn.server.router.clientrm.PassThroughClientRequestInterceptor; +import org.apache.hadoop.yarn.server.router.clientrm.TestableFederationClientInterceptor; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Extends the {@code BaseRouterWebServicesTest} and overrides methods in order + * to use the {@code RouterWebServices} pipeline test cases for testing the + * {@code FederationInterceptorREST} class. The tests for + * {@code RouterWebServices} has been written cleverly so that it can be reused + * to validate different request interceptor chains. + *

+ * It tests the case with SubClusters down and the Router logic of retries. We + * have 1 good SubCluster and 2 bad ones for all the tests. + */ +public class TestFederationInterceptorRESTRetry + extends BaseRouterWebServicesTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestFederationInterceptorRESTRetry.class); + private static final int SERVICE_UNAVAILABLE = 503; + private static final int ACCEPTED = 202; + private static final int OK = 200; + // running and registered + private static SubClusterId good; + // registered but not running + private static SubClusterId bad1; + private static SubClusterId bad2; + private static List scs = new ArrayList(); + private TestableFederationInterceptorREST interceptor; + private MemoryFederationStateStore stateStore; + private FederationStateStoreTestUtil stateStoreUtil; + private String user = "test-user"; + + @Override + public void setUp() { + super.setUpConfig(); + interceptor = new TestableFederationInterceptorREST(); + + stateStore = new MemoryFederationStateStore(); + stateStore.init(this.getConf()); + FederationStateStoreFacade.getInstance().reinitialize(stateStore, + getConf()); + stateStoreUtil = new FederationStateStoreTestUtil(stateStore); + + interceptor.setConf(this.getConf()); + interceptor.init(user); + + // Create SubClusters + good = SubClusterId.newInstance("0"); + bad1 = SubClusterId.newInstance("1"); + bad2 = SubClusterId.newInstance("2"); + scs.add(good); + scs.add(bad1); + scs.add(bad2); + + // The mock RM will not start in these SubClusters, this is done to simulate + // a SubCluster down + + interceptor.registerBadSubCluster(bad1); + interceptor.registerBadSubCluster(bad2); + } + + @Override + public void tearDown() { + interceptor.shutdown(); + super.tearDown(); + } + + private void setupCluster(List scsToRegister) + throws YarnException { + + try { + // Clean up the StateStore before every test + stateStoreUtil.deregisterAllSubClusters(); + + for (SubClusterId sc : scsToRegister) { + stateStoreUtil.registerSubCluster(sc); + } + } catch (YarnException e) { + LOG.error(e.getMessage()); + Assert.fail(); + } + } + + @Override + protected YarnConfiguration createConfiguration() { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true); + + conf.set(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS, + MockDefaultRequestInterceptorREST.class.getName()); + + String mockPassThroughInterceptorClass = + PassThroughClientRequestInterceptor.class.getName(); + + // Create a request intercepter pipeline for testing. The last one in the + // chain is the federation intercepter that calls the mock resource manager. + // The others in the chain will simply forward it to the next one in the + // chain + conf.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, + mockPassThroughInterceptorClass + "," + + TestableFederationClientInterceptor.class.getName()); + + conf.set(YarnConfiguration.FEDERATION_POLICY_MANAGER, + UniformBroadcastPolicyManager.class.getName()); + + // Disable StateStoreFacade cache + conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0); + + return conf; + } + + /** + * This test validates the correctness of GetNewApplication in case the + * cluster is composed of only 1 bad SubCluster. + */ + @Test + public void testGetNewApplicationOneBadSC() + throws YarnException, IOException, InterruptedException { + + setupCluster(Arrays.asList(bad2)); + + Response response = interceptor.createNewApplication(null); + Assert.assertEquals(SERVICE_UNAVAILABLE, response.getStatus()); + Assert.assertEquals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE, + response.getEntity()); + } + + /** + * This test validates the correctness of GetNewApplication in case the + * cluster is composed of only 2 bad SubClusters. + */ + @Test + public void testGetNewApplicationTwoBadSCs() + throws YarnException, IOException, InterruptedException { + setupCluster(Arrays.asList(bad1, bad2)); + + Response response = interceptor.createNewApplication(null); + Assert.assertEquals(SERVICE_UNAVAILABLE, response.getStatus()); + Assert.assertEquals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE, + response.getEntity()); + } + + /** + * This test validates the correctness of GetNewApplication in case the + * cluster is composed of only 1 bad SubCluster and 1 good one. + */ + @Test + public void testGetNewApplicationOneBadOneGood() + throws YarnException, IOException, InterruptedException { + System.out.println("Test getNewApplication with one bad, one good SC"); + setupCluster(Arrays.asList(good, bad2)); + Response response = interceptor.createNewApplication(null); + + Assert.assertEquals(OK, response.getStatus()); + + NewApplication newApp = (NewApplication) response.getEntity(); + ApplicationId appId = ApplicationId.fromString(newApp.getApplicationId()); + + Assert.assertEquals(Integer.parseInt(good.getId()), + appId.getClusterTimestamp()); + } + + /** + * This test validates the correctness of SubmitApplication in case the + * cluster is composed of only 1 bad SubCluster. + */ + @Test + public void testSubmitApplicationOneBadSC() + throws YarnException, IOException, InterruptedException { + + setupCluster(Arrays.asList(bad2)); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + Response response = interceptor.submitApplication(context, null); + Assert.assertEquals(SERVICE_UNAVAILABLE, response.getStatus()); + Assert.assertEquals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE, + response.getEntity()); + } + + /** + * This test validates the correctness of SubmitApplication in case the + * cluster is composed of only 2 bad SubClusters. + */ + @Test + public void testSubmitApplicationTwoBadSCs() + throws YarnException, IOException, InterruptedException { + setupCluster(Arrays.asList(bad1, bad2)); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + + Response response = interceptor.submitApplication(context, null); + Assert.assertEquals(SERVICE_UNAVAILABLE, response.getStatus()); + Assert.assertEquals(FederationPolicyUtils.NO_ACTIVE_SUBCLUSTER_AVAILABLE, + response.getEntity()); + } + + /** + * This test validates the correctness of SubmitApplication in case the + * cluster is composed of only 1 bad SubCluster and a good one. + */ + @Test + public void testSubmitApplicationOneBadOneGood() + throws YarnException, IOException, InterruptedException { + System.out.println("Test submitApplication with one bad, one good SC"); + setupCluster(Arrays.asList(good, bad2)); + + ApplicationId appId = + ApplicationId.newInstance(System.currentTimeMillis(), 1); + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(appId.toString()); + Response response = interceptor.submitApplication(context, null); + + Assert.assertEquals(ACCEPTED, response.getStatus()); + + Assert.assertEquals(good, + stateStore + .getApplicationHomeSubCluster( + GetApplicationHomeSubClusterRequest.newInstance(appId)) + .getApplicationHomeSubCluster().getHomeSubCluster()); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java new file mode 100644 index 00000000000..c96575c21a4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.util.Map; + +import javax.ws.rs.core.Response; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.router.webapp.RouterWebServices.RequestInterceptorChainWrapper; +import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test class to validate the WebService interceptor model inside the Router. + */ +public class TestRouterWebServices extends BaseRouterWebServicesTest { + + private String user = "test1"; + + /** + * Test that all requests get forwarded to the last interceptor in the chain + * get back the responses. + */ + @Test + public void testRouterWebServicesE2E() throws Exception { + + ClusterInfo clusterInfo = get(user); + Assert.assertNotNull(clusterInfo); + + ClusterInfo clusterInfo2 = getClusterInfo(user); + Assert.assertNotNull(clusterInfo2); + + ClusterMetricsInfo clusterMetricsInfo = getClusterMetricsInfo(user); + Assert.assertNotNull(clusterMetricsInfo); + + SchedulerTypeInfo schedulerTypeInfo = getSchedulerInfo(user); + Assert.assertNotNull(schedulerTypeInfo); + + String dumpResult = dumpSchedulerLogs(user); + Assert.assertNotNull(dumpResult); + + NodesInfo nodesInfo = getNodes(user); + Assert.assertNotNull(nodesInfo); + + NodeInfo nodeInfo = getNode(user); + Assert.assertNotNull(nodeInfo); + + AppsInfo appsInfo = getApps(user); + Assert.assertNotNull(appsInfo); + + ActivitiesInfo activitiesInfo = getActivities(user); + Assert.assertNotNull(activitiesInfo); + + AppActivitiesInfo appActiviesInfo = getAppActivities(user); + Assert.assertNotNull(appActiviesInfo); + + ApplicationStatisticsInfo applicationStatisticsInfo = + getAppStatistics(user); + Assert.assertNotNull(applicationStatisticsInfo); + + AppInfo appInfo = getApp(user); + Assert.assertNotNull(appInfo); + + AppState appState = getAppState(user); + Assert.assertNotNull(appState); + + Response response = updateAppState(user); + Assert.assertNotNull(response); + + NodeToLabelsInfo nodeToLabelsInfo = getNodeToLabels(user); + Assert.assertNotNull(nodeToLabelsInfo); + + LabelsToNodesInfo labelsToNodesInfo = getLabelsToNodes(user); + Assert.assertNotNull(labelsToNodesInfo); + + Response response2 = replaceLabelsOnNodes(user); + Assert.assertNotNull(response2); + + Response response3 = replaceLabelsOnNode(user); + Assert.assertNotNull(response3); + + NodeLabelsInfo nodeLabelsInfo = getClusterNodeLabels(user); + Assert.assertNotNull(nodeLabelsInfo); + + Response response4 = addToClusterNodeLabels(user); + Assert.assertNotNull(response4); + + Response response5 = removeFromCluserNodeLabels(user); + Assert.assertNotNull(response5); + + NodeLabelsInfo nodeLabelsInfo2 = getLabelsOnNode(user); + Assert.assertNotNull(nodeLabelsInfo2); + + AppPriority appPriority = getAppPriority(user); + Assert.assertNotNull(appPriority); + + Response response6 = updateApplicationPriority(user); + Assert.assertNotNull(response6); + + AppQueue appQueue = getAppQueue(user); + Assert.assertNotNull(appQueue); + + Response response7 = updateAppQueue(user); + Assert.assertNotNull(response7); + + Response response8 = createNewApplication(user); + Assert.assertNotNull(response8); + + Response response9 = submitApplication(user); + Assert.assertNotNull(response9); + + Response response10 = postDelegationToken(user); + Assert.assertNotNull(response10); + + Response response11 = postDelegationTokenExpiration(user); + Assert.assertNotNull(response11); + + Response response12 = cancelDelegationToken(user); + Assert.assertNotNull(response12); + + Response response13 = createNewReservation(user); + Assert.assertNotNull(response13); + + Response response14 = submitReservation(user); + Assert.assertNotNull(response14); + + Response response15 = updateReservation(user); + Assert.assertNotNull(response15); + + Response response16 = deleteReservation(user); + Assert.assertNotNull(response16); + + Response response17 = listReservation(user); + Assert.assertNotNull(response17); + + AppTimeoutInfo appTimeoutInfo = getAppTimeout(user); + Assert.assertNotNull(appTimeoutInfo); + + AppTimeoutsInfo appTimeoutsInfo = getAppTimeouts(user); + Assert.assertNotNull(appTimeoutsInfo); + + Response response18 = updateApplicationTimeout(user); + Assert.assertNotNull(response18); + + AppAttemptsInfo appAttemptsInfo = getAppAttempts(user); + Assert.assertNotNull(appAttemptsInfo); + + AppAttemptInfo appAttemptInfo = getAppAttempt(user); + Assert.assertNotNull(appAttemptInfo); + + ContainersInfo containersInfo = getContainers(user); + Assert.assertNotNull(containersInfo); + + ContainerInfo containerInfo = getContainer(user); + Assert.assertNotNull(containerInfo); + } + + /** + * Tests if the pipeline is created properly. + */ + @Test + public void testRequestInterceptorChainCreation() throws Exception { + RESTRequestInterceptor root = + super.getRouterWebServices().createRequestInterceptorChain(); + int index = 0; + while (root != null) { + // The current pipeline is: + // PassThroughRESTRequestInterceptor - index = 0 + // PassThroughRESTRequestInterceptor - index = 1 + // PassThroughRESTRequestInterceptor - index = 2 + // MockRESTRequestInterceptor - index = 3 + switch (index) { + case 0: // Fall to the next case + case 1: // Fall to the next case + case 2: + // If index is equal to 0,1 or 2 we fall in this check + Assert.assertEquals(PassThroughRESTRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + case 3: + Assert.assertEquals(MockRESTRequestInterceptor.class.getName(), + root.getClass().getName()); + break; + default: + Assert.fail(); + } + root = root.getNextInterceptor(); + index++; + } + Assert.assertEquals("The number of interceptors in chain does not match", 4, + index); + } + + /** + * Test if the different chains for users are generated, and LRU cache is + * working as expected. + */ + @Test + public void testUsersChainMapWithLRUCache() + throws YarnException, IOException, InterruptedException { + getInterceptorChain("test1"); + getInterceptorChain("test2"); + getInterceptorChain("test3"); + getInterceptorChain("test4"); + getInterceptorChain("test5"); + getInterceptorChain("test6"); + getInterceptorChain("test7"); + getInterceptorChain("test8"); + + Map pipelines = + getRouterWebServices().getPipelines(); + Assert.assertEquals(8, pipelines.size()); + + getInterceptorChain("test9"); + getInterceptorChain("test10"); + getInterceptorChain("test1"); + getInterceptorChain("test11"); + + // The cache max size is defined in TEST_MAX_CACHE_SIZE + Assert.assertEquals(10, pipelines.size()); + + RequestInterceptorChainWrapper chain = pipelines.get("test1"); + Assert.assertNotNull("test1 should not be evicted", chain); + + chain = pipelines.get("test2"); + Assert.assertNull("test2 should have been evicted", chain); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java new file mode 100644 index 00000000000..d7b1a0f94bc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServicesREST.java @@ -0,0 +1,1298 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; + +import javax.ws.rs.core.MediaType; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.NodeLabel; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.NodeManager; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServiceProtocol; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewApplication; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NewReservation; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.router.Router; +import org.apache.hadoop.yarn.server.webapp.WebServices; +import org.apache.hadoop.yarn.server.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; +import org.codehaus.jettison.json.JSONException; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.sun.jersey.api.client.Client; +import com.sun.jersey.api.client.ClientHandlerException; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.WebResource.Builder; + +import net.jcip.annotations.NotThreadSafe; + +/** + * This test validate E2E the correctness of the RouterWebServices. It starts + * Router, RM and NM in 3 different processes to avoid servlet conflicts. Each + * test creates a REST call to Router and validate that the operation complete + * successfully. + */ +@NotThreadSafe +public class TestRouterWebServicesREST { + + private static String userName = "test"; + + private static JavaProcess rm; + private static JavaProcess nm; + private static JavaProcess router; + + private static Configuration conf; + + private static final int STATUS_OK = 200; + private static final int STATUS_ACCEPTED = 202; + private static final int STATUS_BADREQUEST = 400; + private static final int STATUS_ERROR = 500; + + /** + * Wait until the webservice is up and running. + */ + private static void waitWebAppRunning(String address, String path) { + while (true) { + Client clientToRouter = Client.create(); + WebResource toRouter = clientToRouter.resource(address).path(path); + try { + ClientResponse response = toRouter.accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + if (response.getStatus() == STATUS_OK) { + // process is up and running + return; + } + } catch (ClientHandlerException e) { + // process is not up and running + continue; + } + } + } + + @BeforeClass + public static void setUp() throws Exception { + conf = new YarnConfiguration(); + rm = new JavaProcess(ResourceManager.class); + router = new JavaProcess(Router.class); + nm = new JavaProcess(NodeManager.class); + + // The tests cannot start if all the service are not up and running. + waitWebAppRunning(WebAppUtils.getRMWebAppURLWithScheme(conf), + RMWSConsts.RM_WEB_SERVICE_PATH); + + waitWebAppRunning(WebAppUtils.getRouterWebAppURLWithScheme(conf), + RMWSConsts.RM_WEB_SERVICE_PATH); + + waitWebAppRunning("http://" + WebAppUtils.getNMWebAppURLWithoutScheme(conf), + "/ws/v1/node"); + } + + @AfterClass + public static void stop() throws Exception { + nm.stop(); + router.stop(); + rm.stop(); + } + + /** + * Performs 2 GET calls one to RM and the one to Router. In positive case, it + * returns the 2 answers in a list. + */ + private static List performGetCalls(String path, Class returnType, + String queryName, String queryValue) + throws IOException, InterruptedException { + Client clientToRouter = Client.create(); + WebResource toRouter = clientToRouter + .resource(WebAppUtils.getRouterWebAppURLWithScheme(conf)).path(path); + + Client clientToRM = Client.create(); + WebResource toRM = clientToRM + .resource(WebAppUtils.getRMWebAppURLWithScheme(conf)).path(path); + + Builder toRouterBuilder; + Builder toRMBuilder; + + if (queryValue != null && queryName != null) { + toRouterBuilder = toRouter.queryParam(queryName, queryValue) + .accept(MediaType.APPLICATION_XML); + toRMBuilder = toRM.queryParam(queryName, queryValue) + .accept(MediaType.APPLICATION_XML); + } else { + toRouterBuilder = toRouter.accept(MediaType.APPLICATION_XML); + toRMBuilder = toRM.accept(MediaType.APPLICATION_XML); + } + + return UserGroupInformation.createRemoteUser(userName) + .doAs(new PrivilegedExceptionAction>() { + @Override + public List run() throws Exception { + ClientResponse response = toRouterBuilder.get(ClientResponse.class); + ClientResponse response2 = toRMBuilder.get(ClientResponse.class); + if (response.getStatus() == STATUS_OK + && response2.getStatus() == STATUS_OK) { + List responses = new ArrayList(); + responses.add(response.getEntity(returnType)); + responses.add(response2.getEntity(returnType)); + return responses; + } else { + Assert.fail(); + } + return null; + } + }); + } + + /** + * Performs a POST/PUT/DELETE call to Router and returns the ClientResponse. + */ + private static ClientResponse performCall(String webAddress, String queryKey, + String queryValue, Object context, HTTPMethods method) + throws IOException, InterruptedException { + + return UserGroupInformation.createRemoteUser(userName) + .doAs(new PrivilegedExceptionAction() { + @Override + public ClientResponse run() throws Exception { + Client clientToRouter = Client.create(); + WebResource toRouter = clientToRouter + .resource(WebAppUtils.getRouterWebAppURLWithScheme(conf)) + .path(webAddress); + + WebResource toRouterWR; + if (queryKey != null && queryValue != null) { + toRouterWR = toRouter.queryParam(queryKey, queryValue); + } else { + toRouterWR = toRouter; + } + + Builder builder = null; + if (context != null) { + builder = toRouterWR.entity(context, MediaType.APPLICATION_JSON); + builder = builder.accept(MediaType.APPLICATION_JSON); + } else { + builder = toRouter.accept(MediaType.APPLICATION_JSON); + } + + ClientResponse response = null; + + switch (method) { + case DELETE: + response = builder.delete(ClientResponse.class); + break; + case POST: + response = builder.post(ClientResponse.class); + break; + case PUT: + response = builder.put(ClientResponse.class); + break; + default: + break; + } + + return response; + } + }); + } + + /** + * This test validates the correctness of {@link RMWebServiceProtocol#get()} + * inside Router. + */ + @Test(timeout = 1000) + public void testInfoXML() throws JSONException, Exception { + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH, ClusterInfo.class, null, null); + + ClusterInfo routerResponse = responses.get(0); + ClusterInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getRMVersion(), + routerResponse.getRMVersion()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getClusterInfo()} inside Router. + */ + @Test(timeout = 1000) + public void testClusterInfoXML() throws JSONException, Exception { + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.INFO, + ClusterInfo.class, null, null); + + ClusterInfo routerResponse = responses.get(0); + ClusterInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getRMVersion(), + routerResponse.getRMVersion()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getClusterMetricsInfo()} inside Router. + */ + @Test(timeout = 1000) + public void testMetricsInfoXML() throws JSONException, Exception { + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.METRICS, + ClusterMetricsInfo.class, null, null); + + ClusterMetricsInfo routerResponse = responses.get(0); + ClusterMetricsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getActiveNodes(), + routerResponse.getActiveNodes()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getSchedulerInfo()} inside Router. + */ + @Test(timeout = 1000) + public void testSchedulerInfoXML() throws JSONException, Exception { + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER, + SchedulerTypeInfo.class, null, null); + + SchedulerTypeInfo routerResponse = responses.get(0); + SchedulerTypeInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getSchedulerInfo().getSchedulerType(), + routerResponse.getSchedulerInfo().getSchedulerType()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getNodes()} inside Router. + */ + @Test(timeout = 1000) + public void testNodesXML() throws JSONException, Exception { + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES, + NodesInfo.class, RMWSConsts.STATES, "LOST"); + + NodesInfo routerResponse = responses.get(0); + NodesInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getNodes().size(), + routerResponse.getNodes().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getNode()} inside Router. + */ + @Test(timeout = 1000) + public void testNodeXML() throws JSONException, Exception { + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES + "/" + getNodeId(), + NodeInfo.class, null, null); + + NodeInfo routerResponse = responses.get(0); + NodeInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getVersion(), routerResponse.getVersion()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getActivities()} inside Router. + */ + @Test(timeout = 1000) + public void testActiviesXML() throws JSONException, Exception { + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_ACTIVITIES, + ActivitiesInfo.class, null, null); + + ActivitiesInfo routerResponse = responses.get(0); + ActivitiesInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppActivities()} inside Router. + */ + @Test(timeout = 1000) + public void testAppActivitiesXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_APP_ACTIVITIES, + AppActivitiesInfo.class, RMWSConsts.APP_ID, appId); + + AppActivitiesInfo routerResponse = responses.get(0); + AppActivitiesInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppStatistics()} inside Router. + */ + @Test(timeout = 1000) + public void testAppStatisticsXML() throws JSONException, Exception { + + submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APP_STATISTICS, + ApplicationStatisticsInfo.class, RMWSConsts.STATES, "RUNNING"); + + ApplicationStatisticsInfo routerResponse = responses.get(0); + ApplicationStatisticsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getStatItems().size(), + routerResponse.getStatItems().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#dumpSchedulerLogs()} inside Router. + */ + @Test(timeout = 1000) + public void testDumpSchedulerLogsXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_LOGS, + null, null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + ClientResponse response = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.SCHEDULER_LOGS, + RMWSConsts.TIME, "1", null, HTTPMethods.POST); + + if (response.getStatus() == STATUS_BADREQUEST) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#createNewApplication()} inside Router. + */ + @Test(timeout = 1000) + public void testNewApplicationXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS_NEW_APPLICATION, null, + null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS_NEW_APPLICATION, null, + null, null, HTTPMethods.POST); + + if (response.getStatus() == STATUS_OK) { + NewApplication ci = response.getEntity(NewApplication.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#submitApplication()} inside Router. + */ + @Test(timeout = 1000) + public void testSubmitApplicationXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS, null, + null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + context.setApplicationId(getNewApplicationId().getApplicationId()); + + ClientResponse response = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS, null, + null, context, HTTPMethods.POST); + + if (response.getStatus() == STATUS_ACCEPTED) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getApps()} inside Router. + */ + @Test(timeout = 1000) + public void testAppsXML() throws JSONException, Exception { + + submitApplication(); + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS, + AppsInfo.class, null, null); + + AppsInfo routerResponse = responses.get(0); + AppsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getApps().size(), + rmResponse.getApps().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getApp()} inside Router. + */ + @Test(timeout = 1000) + public void testAppXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId, + AppInfo.class, null, null); + + AppInfo routerResponse = responses.get(0); + AppInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getAMHostHttpAddress(), + rmResponse.getAMHostHttpAddress()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppAttempts()} inside Router. + */ + @Test(timeout = 1000) + public void testAppAttemptXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.ATTEMPTS, + AppAttemptsInfo.class, null, null); + + AppAttemptsInfo routerResponse = responses.get(0); + AppAttemptsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getAttempts().size(), + rmResponse.getAttempts().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppState()} inside Router. + */ + @Test(timeout = 1000) + public void testAppStateXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + + appId + "/" + RMWSConsts.STATE, AppState.class, null, null); + + AppState routerResponse = responses.get(0); + AppState rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getState(), rmResponse.getState()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#updateAppState()} inside Router. + */ + @Test(timeout = 1000) + public void testUpdateAppStateXML() throws JSONException, Exception { + + String appId = submitApplication(); + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall(RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.STATE, null, null, + null, HTTPMethods.POST); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + AppState appState = new AppState("KILLED"); + + ClientResponse response = performCall(RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.STATE, null, null, + appState, HTTPMethods.PUT); + + if (response.getStatus() == STATUS_ACCEPTED) { + AppState ci = response.getEntity(AppState.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppPriority()} inside Router. + */ + @Test(timeout = 1000) + public void testAppPriorityXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + + appId + "/" + RMWSConsts.PRIORITY, AppPriority.class, null, null); + + AppPriority routerResponse = responses.get(0); + AppPriority rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getPriority(), rmResponse.getPriority()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#updateApplicationPriority()} inside Router. + */ + @Test(timeout = 1000) + public void testUpdateAppPriorityXML() throws JSONException, Exception { + + String appId = submitApplication(); + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall(RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.PRIORITY, null, null, + null, HTTPMethods.POST); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + AppPriority appPriority = new AppPriority(1); + + ClientResponse response = + performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.PRIORITY, + null, null, appPriority, HTTPMethods.PUT); + + if (response.getStatus() == STATUS_OK) { + AppPriority ci = response.getEntity(AppPriority.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppQueue()} inside Router. + */ + @Test(timeout = 1000) + public void testAppQueueXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + + appId + "/" + RMWSConsts.QUEUE, AppQueue.class, null, null); + + AppQueue routerResponse = responses.get(0); + AppQueue rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getQueue(), rmResponse.getQueue()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#updateAppQueue()} inside Router. + */ + @Test(timeout = 1000) + public void testUpdateAppQueueXML() throws JSONException, Exception { + + String appId = submitApplication(); + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall(RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.QUEUE, null, null, + null, HTTPMethods.POST); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + AppQueue appQueue = new AppQueue("default"); + + ClientResponse response = performCall(RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.QUEUE, null, null, + appQueue, HTTPMethods.PUT); + + if (response.getStatus() == STATUS_OK) { + AppQueue ci = response.getEntity(AppQueue.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppTimeouts()} inside Router. + */ + @Test(timeout = 1000) + public void testAppTimeoutsXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.TIMEOUTS, + AppTimeoutsInfo.class, null, null); + + AppTimeoutsInfo routerResponse = responses.get(0); + AppTimeoutsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getAppTimeouts().size(), + rmResponse.getAppTimeouts().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getAppTimeout()} inside Router. + */ + @Test(timeout = 1000) + public void testAppTimeoutXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.TIMEOUTS + "/" + "LIFETIME", + AppTimeoutInfo.class, null, null); + + AppTimeoutInfo routerResponse = responses.get(0); + AppTimeoutInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getExpireTime(), rmResponse.getExpireTime()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#updateApplicationTimeout()} inside Router. + */ + @Test(timeout = 1000) + public void testUpdateAppTimeoutsXML() throws JSONException, Exception { + + String appId = submitApplication(); + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall(RMWSConsts.RM_WEB_SERVICE_PATH + + RMWSConsts.APPS + "/" + appId + "/" + RMWSConsts.TIMEOUT, null, null, + null, HTTPMethods.POST); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + // Create a bad request + AppTimeoutInfo appTimeoutInfo = new AppTimeoutInfo(); + + ClientResponse response = + performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.TIMEOUT, + null, null, appTimeoutInfo, HTTPMethods.PUT); + + if (response.getStatus() == STATUS_BADREQUEST) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#createNewReservation()} inside Router. + */ + @Test(timeout = 1000) + public void testNewReservationXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_NEW, + null, null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + ClientResponse response = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_NEW, + null, null, null, HTTPMethods.POST); + + if (response.getStatus() == STATUS_OK) { + NewReservation ci = response.getEntity(NewReservation.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#submitReservation()} inside Router. + */ + @Test(timeout = 1000) + public void testSubmitReservationXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_SUBMIT, null, + null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + ReservationSubmissionRequestInfo context = + new ReservationSubmissionRequestInfo(); + context.setReservationId(getNewReservationId().getReservationId()); + // ReservationDefinition is null + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_SUBMIT, null, + null, context, HTTPMethods.POST); + + if (response.getStatus() == STATUS_BADREQUEST) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#updateReservation()} inside Router. + */ + @Test(timeout = 1000) + public void testUpdateReservationXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_UPDATE, null, + null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + String reservationId = getNewReservationId().getReservationId(); + ReservationUpdateRequestInfo context = new ReservationUpdateRequestInfo(); + context.setReservationId(reservationId); + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_UPDATE, null, + null, context, HTTPMethods.POST); + + if (response.getStatus() == STATUS_BADREQUEST) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#deleteReservation()} inside Router. + */ + @Test(timeout = 1000) + public void testDeleteReservationXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_DELETE, null, + null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + String reservationId = getNewReservationId().getReservationId(); + ReservationDeleteRequestInfo context = new ReservationDeleteRequestInfo(); + context.setReservationId(reservationId); + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_DELETE, null, + null, context, HTTPMethods.POST); + + if (response.getStatus() == STATUS_BADREQUEST) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getNodeToLabels()} inside Router. + */ + @Test(timeout = 1000) + public void testGetNodeToLabelsXML() throws JSONException, Exception { + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.GET_NODE_TO_LABELS, + NodeToLabelsInfo.class, null, null); + + NodeToLabelsInfo routerResponse = responses.get(0); + NodeToLabelsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getNodeToLabels().size(), + rmResponse.getNodeToLabels().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getClusterNodeLabels()} inside Router. + */ + @Test(timeout = 1000) + public void testGetClusterNodeLabelsXML() throws JSONException, Exception { + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.GET_NODE_LABELS, + NodeLabelsInfo.class, null, null); + + NodeLabelsInfo routerResponse = responses.get(0); + NodeLabelsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getNodeLabels().size(), + rmResponse.getNodeLabels().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getLabelsOnNode()} inside Router. + */ + @Test(timeout = 1000) + public void testGetLabelsOnNodeXML() throws JSONException, Exception { + + List responses = + performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES + "/" + + getNodeId() + "/" + RMWSConsts.GET_LABELS, + NodeLabelsInfo.class, null, null); + + NodeLabelsInfo routerResponse = responses.get(0); + NodeLabelsInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getNodeLabels().size(), + rmResponse.getNodeLabels().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#getLabelsToNodes()} inside Router. + */ + @Test(timeout = 1000) + public void testGetLabelsMappingXML() throws JSONException, Exception { + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.LABEL_MAPPINGS, + LabelsToNodesInfo.class, null, null); + + LabelsToNodesInfo routerResponse = responses.get(0); + LabelsToNodesInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getLabelsToNodes().size(), + rmResponse.getLabelsToNodes().size()); + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#addToClusterNodeLabels()} inside Router. + */ + @Test(timeout = 1000) + public void testAddToClusterNodeLabelsXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.ADD_NODE_LABELS, + null, null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + List nodeLabels = new ArrayList(); + nodeLabels.add(NodeLabel.newInstance("default")); + NodeLabelsInfo context = new NodeLabelsInfo(nodeLabels); + + ClientResponse response = + performCall(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.ADD_NODE_LABELS, + null, null, context, HTTPMethods.POST); + + if (response.getStatus() == STATUS_OK) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#removeFromCluserNodeLabels()} inside Router. + */ + @Test(timeout = 1000) + public void testRemoveFromCluserNodeLabelsXML() + throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.REMOVE_NODE_LABELS, null, + null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + addNodeLabel(); + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.REMOVE_NODE_LABELS, + RMWSConsts.LABELS, "default", null, HTTPMethods.POST); + + if (response.getStatus() == STATUS_OK) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#replaceLabelsOnNodes()} inside Router. + */ + @Test(timeout = 1000) + public void testReplaceLabelsOnNodesXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.REPLACE_NODE_TO_LABELS, + null, null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + addNodeLabel(); + + NodeToLabelsEntryList context = new NodeToLabelsEntryList(); + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.REPLACE_NODE_TO_LABELS, + null, null, context, HTTPMethods.POST); + + if (response.getStatus() == STATUS_OK) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of + * {@link RMWebServiceProtocol#replaceLabelsOnNode()} inside Router. + */ + @Test(timeout = 1000) + public void testReplaceLabelsOnNodeXML() throws JSONException, Exception { + + // Test with a wrong HTTP method + ClientResponse badResponse = + performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES + "/" + + getNodeId() + "/replace-labels", + null, null, null, HTTPMethods.PUT); + + Assert.assertEquals(STATUS_ERROR, badResponse.getStatus()); + + // Test with the correct HTTP method + + addNodeLabel(); + + ClientResponse response = performCall( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES + "/" + getNodeId() + + "/replace-labels", + RMWSConsts.LABELS, "default", null, HTTPMethods.POST); + + if (response.getStatus() == STATUS_OK) { + String ci = response.getEntity(String.class); + Assert.assertNotNull(ci); + } else { + Assert.fail(); + } + } + + /** + * This test validates the correctness of {@link WebServices#getAppAttempt} + * inside Router. + */ + @Test(timeout = 1000) + public void testGetAppAttemptXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = performGetCalls( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + "/" + + RMWSConsts.APPATTEMPTS + "/" + getAppAttempt(appId), + AppAttemptInfo.class, null, null); + + AppAttemptInfo routerResponse = responses.get(0); + AppAttemptInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getAppAttemptId(), + rmResponse.getAppAttemptId()); + } + + /** + * This test validates the correctness of {@link WebServices#getContainers} + * inside Router. + */ + @Test(timeout = 1000) + public void testGetContainersXML() throws JSONException, Exception { + + String appId = submitApplication(); + + List responses = + performGetCalls(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + + appId + "/" + RMWSConsts.APPATTEMPTS + "/" + getAppAttempt(appId) + + "/" + RMWSConsts.CONTAINERS, ContainersInfo.class, null, null); + + ContainersInfo routerResponse = responses.get(0); + ContainersInfo rmResponse = responses.get(1); + + Assert.assertNotNull(routerResponse); + Assert.assertNotNull(rmResponse); + + Assert.assertEquals(rmResponse.getContainers().size(), + rmResponse.getContainers().size()); + } + + private String getNodeId() { + Client clientToRM = Client.create(); + WebResource toRM = + clientToRM.resource(WebAppUtils.getRMWebAppURLWithScheme(conf)) + .path(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.NODES); + ClientResponse response = + toRM.accept(MediaType.APPLICATION_XML).get(ClientResponse.class); + NodesInfo ci = response.getEntity(NodesInfo.class); + return ci.getNodes().get(0).getNodeId(); + } + + private NewApplication getNewApplicationId() { + Client clientToRM = Client.create(); + WebResource toRM = + clientToRM.resource(WebAppUtils.getRMWebAppURLWithScheme(conf)).path( + RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS_NEW_APPLICATION); + ClientResponse response = + toRM.accept(MediaType.APPLICATION_XML).post(ClientResponse.class); + return response.getEntity(NewApplication.class); + } + + private String submitApplication() { + ApplicationSubmissionContextInfo context = + new ApplicationSubmissionContextInfo(); + String appId = getNewApplicationId().getApplicationId(); + context.setApplicationId(appId); + + Client clientToRouter = Client.create(); + WebResource toRM = + clientToRouter.resource(WebAppUtils.getRMWebAppURLWithScheme(conf)) + .path(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS); + toRM.entity(context, MediaType.APPLICATION_XML) + .accept(MediaType.APPLICATION_XML).post(ClientResponse.class); + return appId; + } + + private NewReservation getNewReservationId() { + Client clientToRM = Client.create(); + WebResource toRM = + clientToRM.resource(WebAppUtils.getRMWebAppURLWithScheme(conf)) + .path(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.RESERVATION_NEW); + ClientResponse response = + toRM.accept(MediaType.APPLICATION_XML).post(ClientResponse.class); + return response.getEntity(NewReservation.class); + } + + private String addNodeLabel() { + Client clientToRM = Client.create(); + WebResource toRM = + clientToRM.resource(WebAppUtils.getRMWebAppURLWithScheme(conf)) + .path(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.ADD_NODE_LABELS); + List nodeLabels = new ArrayList(); + nodeLabels.add(NodeLabel.newInstance("default")); + NodeLabelsInfo context = new NodeLabelsInfo(nodeLabels); + ClientResponse response = toRM.entity(context, MediaType.APPLICATION_XML) + .accept(MediaType.APPLICATION_XML).post(ClientResponse.class); + return response.getEntity(String.class); + } + + private String getAppAttempt(String appId) { + Client clientToRM = Client.create(); + WebResource toRM = + clientToRM.resource(WebAppUtils.getRMWebAppURLWithScheme(conf)) + .path(RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.APPS + "/" + appId + + "/" + RMWSConsts.ATTEMPTS); + ClientResponse response = + toRM.accept(MediaType.APPLICATION_XML).get(ClientResponse.class); + AppAttemptsInfo ci = response.getEntity(AppAttemptsInfo.class); + return ci.getAttempts().get(0).getAppAttemptId(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestableFederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestableFederationInterceptorREST.java new file mode 100644 index 00000000000..ce5bb2376d6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestableFederationInterceptorREST.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.router.webapp; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; + +/** + * Extends the FederationInterceptorREST and overrides methods to provide a + * testable implementation of FederationInterceptorREST. + */ +public class TestableFederationInterceptorREST + extends FederationInterceptorREST { + + private List badSubCluster = new ArrayList(); + + /** + * For testing purpose, some subclusters has to be down to simulate particular + * scenarios as RM Failover, network issues. For this reason we keep track of + * these bad subclusters. This method make the subcluster unusable. + * + * @param badSC the subcluster to make unusable + */ + protected void registerBadSubCluster(SubClusterId badSC) { + + // Adding in the cache the bad SubCluster, in this way we can stop them + getOrCreateInterceptorForSubCluster(badSC, "test"); + + badSubCluster.add(badSC); + MockDefaultRequestInterceptorREST interceptor = + (MockDefaultRequestInterceptorREST) super.getInterceptorForSubCluster( + badSC); + interceptor.setRunning(false); + } + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/capacity-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/capacity-scheduler.xml new file mode 100644 index 00000000000..90c5eeb097e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/capacity-scheduler.xml @@ -0,0 +1,111 @@ + + + + + yarn.scheduler.capacity.maximum-applications + 10000 + + Maximum number of applications that can be pending and running. + + + + + yarn.scheduler.capacity.maximum-am-resource-percent + 0.1 + + Maximum percent of resources in the cluster which can be used to run + application masters i.e. controls number of concurrent running + applications. + + + + + yarn.scheduler.capacity.resource-calculator + org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator + + The ResourceCalculator implementation to be used to compare + Resources in the scheduler. + The default i.e. DefaultResourceCalculator only uses Memory while + DominantResourceCalculator uses dominant-resource to compare + multi-dimensional resources such as Memory, CPU etc. + + + + + yarn.scheduler.capacity.root.queues + default + + The queues at the this level (root is the root queue). + + + + + yarn.scheduler.capacity.root.default.capacity + 100 + Default queue target capacity. + + + + yarn.scheduler.capacity.root.default.user-limit-factor + 1 + + Default queue user limit a percentage from 0.0 to 1.0. + + + + + yarn.scheduler.capacity.root.default.maximum-capacity + 100 + + The maximum capacity of the default queue. + + + + + yarn.scheduler.capacity.root.default.state + RUNNING + + The state of the default queue. State can be one of RUNNING or STOPPED. + + + + + yarn.scheduler.capacity.root.default.acl_submit_applications + * + + The ACL of who can submit jobs to the default queue. + + + + + yarn.scheduler.capacity.root.default.acl_administer_queue + * + + The ACL of who can administer jobs on the default queue. + + + + + yarn.scheduler.capacity.node-locality-delay + -1 + + Number of missed scheduling opportunities after which the CapacityScheduler + attempts to schedule rack-local containers. + Typically this should be set to number of racks in the cluster, this + feature is disabled by default, set to -1. + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/log4j.properties new file mode 100644 index 00000000000..81a3f6ad5d2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/log4j.properties @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# log4j configuration used during build and unit tests + +log4j.rootLogger=info,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/yarn-site.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/yarn-site.xml new file mode 100644 index 00000000000..f3e0de3604b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/resources/yarn-site.xml @@ -0,0 +1,30 @@ + + + + + + + yarn.resourcemanager.reservation-system.enable + true + + + yarn.node-labels.enabled + true + + + yarn.resourcemanager.webapp.address + 0.0.0.0:8080 + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java index 60fc3e57eb7..bcdc46b8b3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerService.java @@ -26,8 +26,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; @@ -43,6 +41,8 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics; import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The cleaner service that maintains the shared cache area, and cleans up stale @@ -57,7 +57,8 @@ public class CleanerService extends CompositeService { */ public static final String GLOBAL_CLEANER_PID = ".cleaner_pid"; - private static final Log LOG = LogFactory.getLog(CleanerService.class); + private static final Logger LOG = + LoggerFactory.getLogger(CleanerService.class); private Configuration conf; private CleanerMetrics metrics; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java index a7fdcbd1354..3e0a62b63a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/CleanerTask.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.sharedcachemanager; import java.io.IOException; import java.util.concurrent.locks.Lock; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; @@ -34,6 +32,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.sharedcache.SharedCacheUtil; import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.CleanerMetrics; import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The task that runs and cleans up the shared cache area for stale entries and @@ -44,7 +44,8 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; @Evolving class CleanerTask implements Runnable { private static final String RENAMED_SUFFIX = "-renamed"; - private static final Log LOG = LogFactory.getLog(CleanerTask.class); + private static final Logger LOG = + LoggerFactory.getLogger(CleanerTask.class); private final String location; private final long sleepTime; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/ClientProtocolService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/ClientProtocolService.java index 1dcca6c96e9..4275674aa47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/ClientProtocolService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/ClientProtocolService.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.sharedcachemanager; import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; @@ -45,6 +43,8 @@ import org.apache.hadoop.yarn.server.sharedcache.SharedCacheUtil; import org.apache.hadoop.yarn.server.sharedcachemanager.metrics.ClientSCMMetrics; import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; import org.apache.hadoop.yarn.server.sharedcachemanager.store.SharedCacheResourceReference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This service handles all rpc calls from the client to the shared cache @@ -55,7 +55,8 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.store.SharedCacheResourc public class ClientProtocolService extends AbstractService implements ClientSCMProtocol { - private static final Log LOG = LogFactory.getLog(ClientProtocolService.class); + private static final Logger LOG = + LoggerFactory.getLogger(ClientProtocolService.class); private final RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SCMAdminProtocolService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SCMAdminProtocolService.java index 6f2baf649c0..e6a885bff5e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SCMAdminProtocolService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SCMAdminProtocolService.java @@ -21,15 +21,12 @@ package org.apache.hadoop.yarn.server.sharedcachemanager; import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; import org.apache.hadoop.yarn.server.api.SCMAdminProtocol; @@ -41,6 +38,8 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This service handles all SCMAdminProtocol rpc calls from administrators @@ -51,7 +50,8 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; public class SCMAdminProtocolService extends AbstractService implements SCMAdminProtocol { - private static final Log LOG = LogFactory.getLog(SCMAdminProtocolService.class); + private static final Logger LOG = + LoggerFactory.getLogger(SCMAdminProtocolService.class); private final RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java index 331e29ee32b..ca683f231bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/SharedCacheManager.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.server.sharedcachemanager; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -36,6 +34,8 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.store.SCMStore; import org.apache.hadoop.yarn.server.sharedcachemanager.webapp.SCMWebServer; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This service maintains the shared cache meta data. It handles claiming and @@ -51,7 +51,8 @@ public class SharedCacheManager extends CompositeService { */ public static final int SHUTDOWN_HOOK_PRIORITY = 30; - private static final Log LOG = LogFactory.getLog(SharedCacheManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(SharedCacheManager.class); private SCMStore store; @@ -156,7 +157,7 @@ public class SharedCacheManager extends CompositeService { sharedCacheManager.init(conf); sharedCacheManager.start(); } catch (Throwable t) { - LOG.fatal("Error starting SharedCacheManager", t); + LOG.error("Error starting SharedCacheManager", t); System.exit(-1); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/CleanerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/CleanerMetrics.java index b86a469f823..55cb074f7a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/CleanerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/CleanerMetrics.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.yarn.server.sharedcachemanager.metrics; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.metrics2.MetricsSource; @@ -31,6 +29,8 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MetricsSourceBuilder; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is for maintaining the various Cleaner activity statistics and @@ -40,7 +40,8 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong; @Evolving @Metrics(name = "CleanerActivity", about = "Cleaner service metrics", context = "yarn") public class CleanerMetrics { - public static final Log LOG = LogFactory.getLog(CleanerMetrics.class); + public static final Logger LOG = + LoggerFactory.getLogger(CleanerMetrics.class); private final MetricsRegistry registry = new MetricsRegistry("cleaner"); private final static CleanerMetrics INSTANCE = create(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java index fe960c6e6f7..6b45745f3ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.yarn.server.sharedcachemanager.metrics; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.metrics2.MetricsSystem; @@ -27,6 +25,8 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is for maintaining client requests metrics @@ -37,7 +37,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; @Metrics(about="Client SCM metrics", context="yarn") public class ClientSCMMetrics { - private static final Log LOG = LogFactory.getLog(ClientSCMMetrics.class); + private static final Logger LOG = + LoggerFactory.getLogger(ClientSCMMetrics.class); final MetricsRegistry registry; private final static ClientSCMMetrics INSTANCE = create(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java index 7fff13a6aec..3cf6632eab2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.yarn.server.sharedcachemanager.metrics; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.metrics2.MetricsSystem; @@ -27,6 +25,8 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is for maintaining shared cache uploader requests metrics @@ -37,8 +37,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; @Metrics(about="shared cache upload metrics", context="yarn") public class SharedCacheUploaderMetrics { - static final Log LOG = - LogFactory.getLog(SharedCacheUploaderMetrics.class); + static final Logger LOG = + LoggerFactory.getLogger(SharedCacheUploaderMetrics.class); final MetricsRegistry registry; private final static SharedCacheUploaderMetrics INSTANCE = create(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java index 7b769a72e04..d917d9b1f1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/store/InMemorySCMStore.java @@ -33,8 +33,6 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; @@ -52,6 +50,8 @@ import org.apache.hadoop.yarn.server.sharedcachemanager.AppChecker; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A thread safe version of an in-memory SCM store. The thread safety is @@ -74,7 +74,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; @Private @Evolving public class InMemorySCMStore extends SCMStore { - private static final Log LOG = LogFactory.getLog(InMemorySCMStore.class); + private static final Logger LOG = + LoggerFactory.getLogger(InMemorySCMStore.class); private final Map cachedResources = new ConcurrentHashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java index 27944d39192..cec085b3191 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMOverviewPage.java @@ -43,7 +43,7 @@ import com.google.inject.Inject; @Unstable public class SCMOverviewPage extends TwoColumnLayout { - @Override protected void preHead(Page.HTML<_> html) { + @Override protected void preHead(Page.HTML<__> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); } @@ -60,9 +60,9 @@ public class SCMOverviewPage extends TwoColumnLayout { static private class SCMOverviewNavBlock extends HtmlBlock { @Override protected void render(Block html) { - html.div("#nav").h3("Tools").ul().li().a("/conf", "Configuration")._() - .li().a("/stacks", "Thread dump")._().li().a("/logs", "Logs")._() - .li().a("/metrics", "Metrics")._()._()._(); + html.div("#nav").h3("Tools").ul().li().a("/conf", "Configuration").__() + .li().a("/stacks", "Thread dump").__().li().a("/logs", "Logs").__() + .li().a("/metrics", "Metrics").__().__().__(); } } @@ -81,15 +81,15 @@ public class SCMOverviewPage extends TwoColumnLayout { CleanerMetrics.getInstance(), ClientSCMMetrics.getInstance(), SharedCacheUploaderMetrics.getInstance()); info("Shared Cache Manager overview"). - _("Started on:", Times.format(scm.getStartTime())). - _("Cache hits: ", metricsInfo.getCacheHits()). - _("Cache misses: ", metricsInfo.getCacheMisses()). - _("Cache releases: ", metricsInfo.getCacheReleases()). - _("Accepted uploads: ", metricsInfo.getAcceptedUploads()). - _("Rejected uploads: ", metricsInfo.getRejectUploads()). - _("Deleted files by the cleaner: ", metricsInfo.getTotalDeletedFiles()). - _("Processed files by the cleaner: ", metricsInfo.getTotalProcessedFiles()); - html._(InfoBlock.class); + __("Started on:", Times.format(scm.getStartTime())). + __("Cache hits: ", metricsInfo.getCacheHits()). + __("Cache misses: ", metricsInfo.getCacheMisses()). + __("Cache releases: ", metricsInfo.getCacheReleases()). + __("Accepted uploads: ", metricsInfo.getAcceptedUploads()). + __("Rejected uploads: ", metricsInfo.getRejectUploads()). + __("Deleted files by the cleaner: ", metricsInfo.getTotalDeletedFiles()). + __("Processed files by the cleaner: ", metricsInfo.getTotalProcessedFiles()); + html.__(InfoBlock.class); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMWebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMWebServer.java index b81ed29f1c7..7984090fdce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMWebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/webapp/SCMWebServer.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.server.sharedcachemanager.webapp; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -28,6 +26,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.sharedcachemanager.SharedCacheManager; import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApps; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A very simple web interface for the metrics reported by @@ -37,7 +37,8 @@ import org.apache.hadoop.yarn.webapp.WebApps; @Private @Unstable public class SCMWebServer extends AbstractService { - private static final Log LOG = LogFactory.getLog(SCMWebServer.class); + private static final Logger LOG = + LoggerFactory.getLogger(SCMWebServer.class); private final SharedCacheManager scm; private WebApp webApp; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index 329d57ebfca..de282fd0631 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server; import java.io.File; import java.io.IOException; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Collection; import java.util.Map; @@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.service.AbstractService; @@ -446,7 +448,16 @@ public class MiniYARNCluster extends CompositeService { public static String getHostname() { try { - return InetAddress.getLocalHost().getHostName(); + String hostname = InetAddress.getLocalHost().getHostName(); + // Create InetSocketAddress to see whether it is resolved or not. + // If not, just return "localhost". + InetSocketAddress addr = + NetUtils.createSocketAddrForHost(hostname, 1); + if (addr.isUnresolved()) { + return "localhost"; + } else { + return hostname; + } } catch (UnknownHostException ex) { throw new RuntimeException(ex); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java index 1675a48dac5..80baf897376 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityGroupFSTimelineStore.java @@ -356,7 +356,13 @@ public class EntityGroupFSTimelineStore extends CompositeService @VisibleForTesting int scanActiveLogs() throws IOException { long startTime = Time.monotonicNow(); - RemoteIterator iter = list(activeRootPath); + int logsToScanCount = scanActiveLogs(activeRootPath); + metrics.addActiveLogDirScanTime(Time.monotonicNow() - startTime); + return logsToScanCount; + } + + int scanActiveLogs(Path dir) throws IOException { + RemoteIterator iter = list(dir); int logsToScanCount = 0; while (iter.hasNext()) { FileStatus stat = iter.next(); @@ -368,10 +374,9 @@ public class EntityGroupFSTimelineStore extends CompositeService AppLogs logs = getAndSetActiveLog(appId, stat.getPath()); executor.execute(new ActiveLogParser(logs)); } else { - LOG.debug("Unable to parse entry {}", name); + logsToScanCount += scanActiveLogs(stat.getPath()); } } - metrics.addActiveLogDirScanTime(Time.monotonicNow() - startTime); return logsToScanCount; } @@ -418,6 +423,18 @@ public class EntityGroupFSTimelineStore extends CompositeService appDirPath = getActiveAppPath(applicationId); if (fs.exists(appDirPath)) { appState = AppState.ACTIVE; + } else { + // check for user directory inside active path + RemoteIterator iter = list(activeRootPath); + while (iter.hasNext()) { + Path child = new Path(iter.next().getPath().getName(), + applicationId.toString()); + appDirPath = new Path(activeRootPath, child); + if (fs.exists(appDirPath)) { + appState = AppState.ACTIVE; + break; + } + } } } if (appState != AppState.UNKNOWN) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java index 7379dd6844a..f7a3d01b731 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java @@ -19,8 +19,6 @@ package org.apache.hadoop.yarn.server.timeline; import com.fasterxml.jackson.databind.ObjectMapper; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -34,6 +32,8 @@ import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBIterator; import org.iq80.leveldb.Options; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; @@ -58,8 +58,8 @@ import java.util.Map; @Private @Unstable public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore { - private static final Log LOG - = LogFactory.getLog(LevelDBCacheTimelineStore.class); + private static final Logger LOG + = LoggerFactory.getLogger(LevelDBCacheTimelineStore.class); private static final String CACHED_LDB_FILE_PREFIX = "-timeline-cache.ldb"; private String dbId; private DB entityDb; @@ -102,7 +102,7 @@ public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore { localFS.setPermission(dbPath, LeveldbUtils.LEVELDB_DIR_UMASK); } } finally { - IOUtils.cleanup(LOG, localFS); + IOUtils.cleanupWithLogger(LOG, localFS); } LOG.info("Using leveldb path " + dbPath); entityDb = factory.open(new File(dbPath.toString()), options); @@ -113,7 +113,7 @@ public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore { @Override protected synchronized void serviceStop() throws Exception { - IOUtils.cleanup(LOG, entityDb); + IOUtils.cleanupWithLogger(LOG, entityDb); Path dbPath = new Path( configuration.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), dbId + CACHED_LDB_FILE_PREFIX); @@ -125,7 +125,7 @@ public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore { "timeline store " + dbPath); } } finally { - IOUtils.cleanup(LOG, localFS); + IOUtils.cleanupWithLogger(LOG, localFS); } super.serviceStop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java index 8540d45e543..04587229b0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestEntityGroupFSTimelineStore.java @@ -37,6 +37,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore.AppState; +import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.After; import org.junit.AfterClass; @@ -58,7 +60,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import static org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore.AppState; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -91,6 +92,7 @@ public class TestEntityGroupFSTimelineStore extends TimelineStoreTestUtils { private static ApplicationId mainTestAppId; private static Path mainTestAppDirPath; private static Path testDoneDirPath; + private static Path testActiveDirPath; private static String mainEntityLogFileName; private EntityGroupFSTimelineStore store; @@ -125,23 +127,28 @@ public class TestEntityGroupFSTimelineStore extends TimelineStoreTestUtils { + i); sampleAppIds.add(appId); } + testActiveDirPath = getTestRootPath("active"); // Among all sample applicationIds, choose the first one for most of the // tests. mainTestAppId = sampleAppIds.get(0); - mainTestAppDirPath = getTestRootPath(mainTestAppId.toString()); + mainTestAppDirPath = new Path(testActiveDirPath, mainTestAppId.toString()); mainEntityLogFileName = EntityGroupFSTimelineStore.ENTITY_LOG_PREFIX + EntityGroupPlugInForTest.getStandardTimelineGroupId(mainTestAppId); testDoneDirPath = getTestRootPath("done"); config.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR, testDoneDirPath.toString()); + config.set( + YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR, + testActiveDirPath.toString()); } @Before public void setup() throws Exception { for (ApplicationId appId : sampleAppIds) { - Path attemotDirPath = new Path(getTestRootPath(appId.toString()), - getAttemptDirName(appId)); + Path attemotDirPath = + new Path(new Path(testActiveDirPath, appId.toString()), + getAttemptDirName(appId)); createTestFiles(appId, attemotDirPath); } @@ -178,7 +185,7 @@ public class TestEntityGroupFSTimelineStore extends TimelineStoreTestUtils { public void tearDown() throws Exception { store.stop(); for (ApplicationId appId : sampleAppIds) { - fs.delete(getTestRootPath(appId.toString()), true); + fs.delete(new Path(testActiveDirPath,appId.toString()), true); } if (testJar != null) { testJar.delete(); @@ -414,8 +421,88 @@ public class TestEntityGroupFSTimelineStore extends TimelineStoreTestUtils { } + @Test + public void testGetEntityPluginRead() throws Exception { + EntityGroupFSTimelineStore store = null; + ApplicationId appId = + ApplicationId.fromString("application_1501509265053_0001"); + String user = UserGroupInformation.getCurrentUser().getShortUserName(); + Path userBase = new Path(testActiveDirPath, user); + Path userAppRoot = new Path(userBase, appId.toString()); + Path attemotDirPath = new Path(userAppRoot, getAttemptDirName(appId)); + + try { + store = createAndStartTimelineStore(AppState.ACTIVE); + String logFileName = EntityGroupFSTimelineStore.ENTITY_LOG_PREFIX + + EntityGroupPlugInForTest.getStandardTimelineGroupId(appId); + createTestFiles(appId, attemotDirPath, logFileName); + TimelineEntity entity = store.getEntity(entityNew.getEntityId(), + entityNew.getEntityType(), EnumSet.allOf(Field.class)); + assertNotNull(entity); + assertEquals(entityNew.getEntityId(), entity.getEntityId()); + assertEquals(entityNew.getEntityType(), entity.getEntityType()); + } finally { + if (store != null) { + store.stop(); + } + fs.delete(userBase, true); + } + } + + @Test + public void testScanActiveLogsAndMoveToDonePluginRead() throws Exception { + EntityGroupFSTimelineStore store = null; + ApplicationId appId = + ApplicationId.fromString("application_1501509265053_0002"); + String user = UserGroupInformation.getCurrentUser().getShortUserName(); + Path userBase = new Path(testActiveDirPath, user); + Path userAppRoot = new Path(userBase, appId.toString()); + Path attemotDirPath = new Path(userAppRoot, getAttemptDirName(appId)); + + try { + store = createAndStartTimelineStore(AppState.COMPLETED); + String logFileName = EntityGroupFSTimelineStore.ENTITY_LOG_PREFIX + + EntityGroupPlugInForTest.getStandardTimelineGroupId(appId); + createTestFiles(appId, attemotDirPath, logFileName); + store.scanActiveLogs(); + + TimelineEntity entity = store.getEntity(entityNew.getEntityId(), + entityNew.getEntityType(), EnumSet.allOf(Field.class)); + assertNotNull(entity); + assertEquals(entityNew.getEntityId(), entity.getEntityId()); + assertEquals(entityNew.getEntityType(), entity.getEntityType()); + } finally { + if (store != null) { + store.stop(); + } + fs.delete(userBase, true); + } + } + + private EntityGroupFSTimelineStore createAndStartTimelineStore( + AppState appstate) { + // stop before creating new store to get the lock + store.stop(); + + EntityGroupFSTimelineStore newStore = new EntityGroupFSTimelineStore() { + @Override + protected AppState getAppState(ApplicationId appId) throws IOException { + return appstate; + } + }; + newStore.init(config); + newStore.setFs(fs); + newStore.start(); + return newStore; + } + private void createTestFiles(ApplicationId appId, Path attemptDirPath) throws IOException { + createTestFiles(appId, attemptDirPath, mainEntityLogFileName); + } + + private void createTestFiles(ApplicationId appId, Path attemptDirPath, + String logPath) throws IOException { TimelineEntities entities = PluginStoreTestUtils.generateTestEntities(); PluginStoreTestUtils.writeEntities(entities, new Path(attemptDirPath, TEST_SUMMARY_LOG_FILE_NAME), fs); @@ -429,7 +516,7 @@ public class TestEntityGroupFSTimelineStore extends TimelineStoreTestUtils { TimelineEntities entityList = new TimelineEntities(); entityList.addEntity(entityNew); PluginStoreTestUtils.writeEntities(entityList, - new Path(attemptDirPath, mainEntityLogFileName), fs); + new Path(attemptDirPath, logPath), fs); FSDataOutputStream out = fs.create( new Path(attemptDirPath, TEST_DOMAIN_LOG_FILE_NAME)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java index 63a75d30a98..3f8978cb40c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java @@ -1325,7 +1325,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { int cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } assertEquals(0, cfgCnt); @@ -1343,7 +1343,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } assertEquals(3, cfgCnt); @@ -1360,7 +1360,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { cfgCnt = 0; for (TimelineEntity entity : entities) { cfgCnt += entity.getConfigs().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); for (String configKey : entity.getConfigs().keySet()) { assertTrue(configKey.startsWith("cfg_") || configKey.startsWith("configuration_")); @@ -1393,7 +1393,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { assertNotNull(entities); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } } finally { client.destroy(); @@ -1457,7 +1457,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { int infoCnt = 0; for (TimelineEntity entity : entities) { infoCnt += entity.getInfo().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } // Includes UID in info field even if fields not specified as INFO. assertEquals(1, infoCnt); @@ -1476,7 +1476,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { infoCnt = 0; for (TimelineEntity entity : entities) { infoCnt += entity.getInfo().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } // Includes UID in info field. assertEquals(4, infoCnt); @@ -1506,7 +1506,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { assertNotNull(entities); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { - assertTrue(entity.getId().equals("entity1")); + assertEquals("entity1", entity.getId()); } } finally { client.destroy(); @@ -1556,7 +1556,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { int metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } assertEquals(0, metricCnt); @@ -1574,7 +1574,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } assertEquals(3, metricCnt); @@ -1593,7 +1593,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); for (TimelineMetric metric : entity.getMetrics()) { assertTrue(metric.getId().startsWith("MAP1")); assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric.getType()); @@ -1614,7 +1614,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { metricCnt = 0; for (TimelineEntity entity : entities) { metricCnt += entity.getMetrics().size(); - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); for (TimelineMetric metric : entity.getMetrics()) { assertTrue(metric.getId().startsWith("MAP1")); if (metric.getId().equals("MAP1_SLOT_MILLIS")) { @@ -1654,7 +1654,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { assertNotNull(entities); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } } finally { client.destroy(); @@ -1695,7 +1695,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { assertNotNull(entities); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } // eventfilters=(!(event1,event3) OR event5,event6) OR @@ -1753,7 +1753,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { assertNotNull(entities); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } // isrelatedto=(!(type3:entity31,type2:entity21:entity22)OR type5: @@ -1785,7 +1785,7 @@ public class TestTimelineReaderWebServicesHBaseStorage { assertNotNull(entities); assertEquals(1, entities.size()); for (TimelineEntity entity : entities) { - assertTrue(entity.getId().equals("entity2")); + assertEquals("entity2", entity.getId()); } // relatesto=(!(type3:entity31,type2:entity21:entity22)OR type5:entity51, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java index b3e5197ddff..3948d236741 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java @@ -69,6 +69,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable; import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName; import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter; +import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter; @@ -493,7 +494,8 @@ public class TestHBaseTimelineStorageApps { event.addInfo(expKey, expVal); final TimelineEntity entity = new ApplicationEntity(); - entity.setId(ApplicationId.newInstance(0, 1).toString()); + entity.setId(HBaseTimelineStorageUtils.convertApplicationIdToString( + ApplicationId.newInstance(0, 1))); entity.addEvent(event); TimelineEntities entities = new TimelineEntities(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java index 4b4c3e17330..e18d0d065b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java @@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefi import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field; import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName; import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter; +import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter; @@ -202,8 +203,9 @@ public class TestHBaseTimelineStorageEntities { String flow = "some_flow_name"; String flowVersion = "AB7822C10F1111"; long runid = 1002345678919L; - String appName = ApplicationId.newInstance(System.currentTimeMillis() + - 9000000L, 1).toString(); + String appName = HBaseTimelineStorageUtils.convertApplicationIdToString( + ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1) + ); hbi.write(cluster, user, flow, flowVersion, runid, appName, te); hbi.stop(); @@ -399,8 +401,8 @@ public class TestHBaseTimelineStorageEntities { String flow = "other_flow_name"; String flowVersion = "1111F01C2287BA"; long runid = 1009876543218L; - String appName = ApplicationId.newInstance(System.currentTimeMillis() + - 9000000L, 1).toString(); + String appName = HBaseTimelineStorageUtils.convertApplicationIdToString( + ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1)); byte[] startRow = new EntityRowKeyPrefix(cluster, user, flow, runid, appName) .getRowKeyPrefix(); @@ -487,7 +489,9 @@ public class TestHBaseTimelineStorageEntities { event.addInfo(expKey, expVal); final TimelineEntity entity = new ApplicationEntity(); - entity.setId(ApplicationId.newInstance(0, 1).toString()); + entity.setId( + HBaseTimelineStorageUtils.convertApplicationIdToString( + ApplicationId.newInstance(0, 1))); entity.addEvent(event); TimelineEntities entities = new TimelineEntities(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java index cccae267d88..a934a3dbf19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java @@ -22,8 +22,6 @@ import java.io.IOException; import java.util.HashSet; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; import org.apache.hadoop.hbase.filter.FamilyFilter; @@ -36,13 +34,16 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix; import org.apache.hadoop.hbase.filter.QualifierFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Set of utility methods used by timeline filter classes. */ public final class TimelineFilterUtils { - private static final Log LOG = LogFactory.getLog(TimelineFilterUtils.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineFilterUtils.class); private TimelineFilterUtils() { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java index a384a84d285..dc50f42396f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage; import java.io.IOException; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.Connection; @@ -34,6 +32,8 @@ import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilter import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReader; import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReaderFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * HBase based implementation for {@link TimelineReader}. @@ -41,8 +41,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEnti public class HBaseTimelineReaderImpl extends AbstractService implements TimelineReader { - private static final Log LOG = LogFactory - .getLog(HBaseTimelineReaderImpl.class); + private static final Logger LOG = LoggerFactory + .getLogger(HBaseTimelineReaderImpl.class); private Configuration hbaseConf = null; private Connection conn; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java index b94b85fa314..afa58cb88a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java @@ -21,8 +21,6 @@ import java.io.IOException; import java.util.Map; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -65,6 +63,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This implements a hbase based backend for storing the timeline entity @@ -76,8 +76,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable; public class HBaseTimelineWriterImpl extends AbstractService implements TimelineWriter { - private static final Log LOG = LogFactory - .getLog(HBaseTimelineWriterImpl.class); + private static final Logger LOG = LoggerFactory + .getLogger(HBaseTimelineWriterImpl.class); private Connection conn; private TypedBufferedMutator entityTable; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java index b3b749edf98..dbed05d5e19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java @@ -29,8 +29,6 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -46,6 +44,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTa import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This creates the schema for a hbase based backend for storing application @@ -58,7 +58,8 @@ public final class TimelineSchemaCreator { } final static String NAME = TimelineSchemaCreator.class.getSimpleName(); - private static final Log LOG = LogFactory.getLog(TimelineSchemaCreator.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineSchemaCreator.class); private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s"; private static final String APP_METRICS_TTL_OPTION_SHORT = "ma"; private static final String APP_TABLE_NAME_SHORT = "a"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java index cb4fc92d755..d3bdd3949c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java @@ -19,8 +19,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.application; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -30,6 +28,8 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The application table as column families info, config and metrics. Info @@ -99,7 +99,8 @@ public class ApplicationTable extends BaseTable { /** default max number of versions. */ private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000; - private static final Log LOG = LogFactory.getLog(ApplicationTable.class); + private static final Logger LOG = + LoggerFactory.getLogger(ApplicationTable.class); public ApplicationTable() { super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java index 301cf997026..40d95a4ced7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java @@ -18,8 +18,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -29,6 +27,8 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -68,7 +68,8 @@ public class AppToFlowTable extends BaseTable { /** default value for app_flow table name. */ private static final String DEFAULT_TABLE_NAME = "timelineservice.app_flow"; - private static final Log LOG = LogFactory.getLog(AppToFlowTable.class); + private static final Logger LOG = + LoggerFactory.getLogger(AppToFlowTable.class); public AppToFlowTable() { super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java index c165801306c..51604f012cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java @@ -82,7 +82,8 @@ public final class AppIdKeyConverter implements KeyConverter { Bytes.toLong(appIdBytes, 0, Bytes.SIZEOF_LONG)); int seqId = HBaseTimelineStorageUtils.invertInt( Bytes.toInt(appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT)); - return ApplicationId.newInstance(clusterTs, seqId).toString(); + return HBaseTimelineStorageUtils.convertApplicationIdToString( + ApplicationId.newInstance(clusterTs, seqId)); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java index be55db50a2b..a9c2148ebc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java @@ -24,13 +24,14 @@ import java.util.Map.Entry; import java.util.NavigableMap; import java.util.TreeMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * This class is meant to be used only by explicit Columns, and not directly to * write by clients. @@ -38,7 +39,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute; * @param refers to the table. */ public class ColumnHelper { - private static final Log LOG = LogFactory.getLog(ColumnHelper.class); + private static final Logger LOG = + LoggerFactory.getLogger(ColumnHelper.class); private final ColumnFamily columnFamily; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java index e93b47053a1..b8c70291793 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java @@ -17,8 +17,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.common; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -26,12 +24,16 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; +import java.text.NumberFormat; import java.util.List; import java.util.Map; @@ -41,8 +43,8 @@ import java.util.Map; public final class HBaseTimelineStorageUtils { /** milliseconds in one day. */ public static final long MILLIS_ONE_DAY = 86400000L; - private static final Log LOG = - LogFactory.getLog(HBaseTimelineStorageUtils.class); + private static final Logger LOG = + LoggerFactory.getLogger(HBaseTimelineStorageUtils.class); private HBaseTimelineStorageUtils() { } @@ -240,4 +242,35 @@ public final class HBaseTimelineStorageUtils { long dayTimestamp = ts - (ts % MILLIS_ONE_DAY); return dayTimestamp; } + + private static final ThreadLocal APP_ID_FORMAT = + new ThreadLocal() { + @Override + public NumberFormat initialValue() { + NumberFormat fmt = NumberFormat.getInstance(); + fmt.setGroupingUsed(false); + fmt.setMinimumIntegerDigits(4); + return fmt; + } + }; + + /** + * A utility method that converts ApplicationId to string without using + * FastNumberFormat in order to avoid the incompatibility issue caused + * by mixing hadoop-common 2.5.1 and hadoop-yarn-api 3.0 in this module. + * This is a work-around implementation as discussed in YARN-6905. + * + * @param appId application id + * @return the string representation of the given application id + * + */ + public static String convertApplicationIdToString(ApplicationId appId) { + StringBuilder sb = new StringBuilder(64); + sb.append(ApplicationId.appIdStrPrefix); + sb.append("_"); + sb.append(appId.getClusterTimestamp()); + sb.append('_'); + sb.append(APP_ID_FORMAT.get().format(appId.getId())); + return sb.toString(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java index ddf0406c80b..df5ce69a271 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java @@ -19,8 +19,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.entity; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -30,6 +28,8 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The entity table as column families info, config and metrics. Info stores @@ -99,7 +99,8 @@ public class EntityTable extends BaseTable { /** default max number of versions. */ private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000; - private static final Log LOG = LogFactory.getLog(EntityTable.class); + private static final Logger LOG = + LoggerFactory.getLogger(EntityTable.class); public EntityTable() { super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java index 8a0430c16e3..e646eb26a56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java @@ -19,8 +19,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.flow; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -29,6 +27,8 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The flow activity table has column family info @@ -63,7 +63,8 @@ public class FlowActivityTable extends BaseTable { public static final String DEFAULT_TABLE_NAME = "timelineservice.flowactivity"; - private static final Log LOG = LogFactory.getLog(FlowActivityTable.class); + private static final Logger LOG = + LoggerFactory.getLogger(FlowActivityTable.class); /** default max number of versions. */ public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java index 2be6ef80a34..221420eb503 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java @@ -24,8 +24,6 @@ import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -50,13 +48,16 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils; import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Coprocessor for flow run table. */ public class FlowRunCoprocessor extends BaseRegionObserver { - private static final Log LOG = LogFactory.getLog(FlowRunCoprocessor.class); + private static final Logger LOG = + LoggerFactory.getLogger(FlowRunCoprocessor.class); private boolean isFlowRunRegion = false; private Region region; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java index 547bef075a5..9c6549ffc12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java @@ -19,8 +19,6 @@ package org.apache.hadoop.yarn.server.timelineservice.storage.flow; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -29,6 +27,8 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The flow run table has column family info @@ -94,7 +94,8 @@ public class FlowRunTable extends BaseTable { /** default value for flowrun table name. */ public static final String DEFAULT_TABLE_NAME = "timelineservice.flowrun"; - private static final Log LOG = LogFactory.getLog(FlowRunTable.class); + private static final Logger LOG = + LoggerFactory.getLogger(FlowRunTable.class); /** default max number of versions. */ public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java index 0e3c8ee1cc1..dbd04843eb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java @@ -27,8 +27,6 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -52,6 +50,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGen import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Invoked via the coprocessor when a Get or a Scan is issued for flow run @@ -62,7 +62,8 @@ import com.google.common.annotations.VisibleForTesting; */ class FlowScanner implements RegionScanner, Closeable { - private static final Log LOG = LogFactory.getLog(FlowScanner.class); + private static final Logger LOG = + LoggerFactory.getLogger(FlowScanner.class); /** * use a special application id to represent the flow id this is needed since diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java index 7b294a8af47..424d14118b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java @@ -27,8 +27,6 @@ import java.util.NavigableSet; import java.util.Set; import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; @@ -54,6 +52,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator; import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The base class for reading and deserializing timeline entities from the @@ -61,7 +61,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn * entities that are being requested. */ public abstract class TimelineEntityReader { - private static final Log LOG = LogFactory.getLog(TimelineEntityReader.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineEntityReader.class); private final boolean singleEntityRead; private TimelineReaderContext context; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java new file mode 100644 index 00000000000..73bc29efc48 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.timelineservice.storage.common; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test for HBaseTimelineStorageUtils.convertApplicationIdToString(), + * a custom conversion from ApplicationId to String that avoids the + * incompatibility issue caused by mixing hadoop-common 2.5.1 and + * hadoop-yarn-api 3.0. See YARN-6905. + */ +public class TestCustomApplicationIdConversion { + @Test + public void testConvertAplicationIdToString() { + ApplicationId applicationId = ApplicationId.newInstance(0, 1); + String applicationIdStr = + HBaseTimelineStorageUtils.convertApplicationIdToString(applicationId); + Assert.assertEquals(applicationId, + ApplicationId.fromString(applicationIdStr)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java index 0b05309b432..56f7b2b8c97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java @@ -19,8 +19,6 @@ package org.apache.hadoop.yarn.server.timelineservice.collector; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -32,6 +30,8 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.HashSet; import java.util.Map; @@ -48,7 +48,8 @@ import java.util.concurrent.TimeUnit; @Private @Unstable public class AppLevelTimelineCollector extends TimelineCollector { - private static final Log LOG = LogFactory.getLog(TimelineCollector.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineCollector.class); private final static int AGGREGATION_EXECUTOR_NUM_THREADS = 1; private final static int AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS = 15; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java index 0323d7b9d9b..171978287c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java @@ -26,8 +26,6 @@ import java.net.InetSocketAddress; import java.net.URI; import java.util.HashMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -47,6 +45,8 @@ import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class on the NodeManager side that manages adding and removing collectors and @@ -55,8 +55,8 @@ import com.google.common.annotations.VisibleForTesting; @Private @Unstable public class NodeTimelineCollectorManager extends TimelineCollectorManager { - private static final Log LOG = - LogFactory.getLog(NodeTimelineCollectorManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(NodeTimelineCollectorManager.class); // REST server for this collector manager. private HttpServer2 timelineRestServer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java index 266bd04d2eb..e4e6421d108 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java @@ -23,8 +23,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -43,6 +41,8 @@ import org.apache.hadoop.yarn.server.api.ContainerTerminationContext; import org.apache.hadoop.yarn.server.api.ContainerType; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The top-level server for the per-node timeline collector manager. Currently @@ -52,8 +52,8 @@ import com.google.common.annotations.VisibleForTesting; @Private @Unstable public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService { - private static final Log LOG = - LogFactory.getLog(PerNodeTimelineCollectorsAuxService.class); + private static final Logger LOG = + LoggerFactory.getLogger(PerNodeTimelineCollectorsAuxService.class); private static final int SHUTDOWN_HOOK_PRIORITY = 30; private final NodeTimelineCollectorManager collectorManager; @@ -209,7 +209,7 @@ public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService { auxService.init(conf); auxService.start(); } catch (Throwable t) { - LOG.fatal("Error starting PerNodeTimelineCollectorServer", t); + LOG.error("Error starting PerNodeTimelineCollectorServer", t); ExitUtil.terminate(-1, "Error starting PerNodeTimelineCollectorServer"); } return auxService; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java index 5416b26eea1..37387f1f767 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java @@ -26,8 +26,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -39,6 +37,8 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse; import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Service that handles writes to the timeline service and writes them to the @@ -51,7 +51,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter; @Unstable public abstract class TimelineCollector extends CompositeService { - private static final Log LOG = LogFactory.getLog(TimelineCollector.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineCollector.class); public static final String SEPARATOR = "_"; private TimelineWriter writer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java index 07cbb2b24ec..94b95ad3c09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java @@ -26,8 +26,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -39,6 +37,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class that manages adding and removing collectors and their lifecycle. It @@ -48,8 +48,8 @@ import com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private @InterfaceStability.Unstable public class TimelineCollectorManager extends AbstractService { - private static final Log LOG = - LogFactory.getLog(TimelineCollectorManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineCollectorManager.class); private TimelineWriter writer; private ScheduledExecutorService writerFlusher; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java index fe04b7afc5b..efb5d6bf04c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java @@ -36,8 +36,6 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -58,6 +56,8 @@ import org.apache.hadoop.yarn.webapp.ForbiddenException; import org.apache.hadoop.yarn.webapp.NotFoundException; import com.google.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The main per-node REST end point for timeline service writes. It is @@ -69,8 +69,8 @@ import com.google.inject.Singleton; @Singleton @Path("/ws/v2/timeline") public class TimelineCollectorWebService { - private static final Log LOG = - LogFactory.getLog(TimelineCollectorWebService.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineCollectorWebService.class); private @Context ServletContext context; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java index 2faf4b61eec..d7eff328686 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java @@ -25,8 +25,6 @@ import java.net.URI; import java.util.HashMap; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -48,12 +46,15 @@ import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** Main class for Timeline Reader. */ @Private @Unstable public class TimelineReaderServer extends CompositeService { - private static final Log LOG = LogFactory.getLog(TimelineReaderServer.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineReaderServer.class); private static final int SHUTDOWN_HOOK_PRIORITY = 30; static final String TIMELINE_READER_MANAGER_ATTR = "timeline.reader.manager"; @@ -203,7 +204,7 @@ public class TimelineReaderServer extends CompositeService { timelineReaderServer.init(conf); timelineReaderServer.start(); } catch (Throwable t) { - LOG.fatal("Error starting TimelineReaderWebServer", t); + LOG.error("Error starting TimelineReaderWebServer", t); ExitUtil.terminate(-1, "Error starting TimelineReaderWebServer"); } return timelineReaderServer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java index 139a1be855e..b3e3cdc5fee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java @@ -40,8 +40,6 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.http.JettyUtils; @@ -57,6 +55,8 @@ import org.apache.hadoop.yarn.webapp.NotFoundException; import com.google.common.annotations.VisibleForTesting; import com.google.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** REST end point for Timeline Reader. */ @Private @@ -64,8 +64,8 @@ import com.google.inject.Singleton; @Singleton @Path("/ws/v2/timeline") public class TimelineReaderWebServices { - private static final Log LOG = - LogFactory.getLog(TimelineReaderWebServices.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineReaderWebServices.class); @Context private ServletContext ctxt; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java index 967702b1775..b4e792b0ea5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java @@ -39,8 +39,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVParser; import org.apache.commons.csv.CSVRecord; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; @@ -54,6 +52,8 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStor import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import com.google.common.annotations.VisibleForTesting; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * File System based implementation for TimelineReader. This implementation may @@ -64,8 +64,8 @@ import com.google.common.annotations.VisibleForTesting; public class FileSystemTimelineReaderImpl extends AbstractService implements TimelineReader { - private static final Log LOG = - LogFactory.getLog(FileSystemTimelineReaderImpl.class); + private static final Logger LOG = + LoggerFactory.getLogger(FileSystemTimelineReaderImpl.class); private String rootPath; private static final String ENTITIES_DIR = "entities"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java index 9b83659ca00..7f7d6405ae9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineStorageUtils.java @@ -23,8 +23,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; @@ -48,8 +46,6 @@ public final class TimelineStorageUtils { private TimelineStorageUtils() { } - private static final Log LOG = LogFactory.getLog(TimelineStorageUtils.class); - /** * Matches key-values filter. Used for relatesTo/isRelatedTo filters. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java index 7d61f74a207..4886c55aeb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.webproxy; import org.apache.hadoop.yarn.webapp.MimeType; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,7 +41,7 @@ public class ProxyUtils { "This filter only works for HTTP/HTTPS"; public static final String LOCATION = "Location"; - public static class _ implements Hamlet._ { + public static class __ implements Hamlet.__ { //Empty } @@ -50,7 +50,7 @@ public class ProxyUtils { super(out, 0, false); } - public HTML html() { + public HTML html() { return new HTML<>("html", null, EnumSet.of(EOpt.ENDTAG)); } } @@ -86,13 +86,13 @@ public class ProxyUtils { PrintWriter writer = response.getWriter(); Page p = new Page(writer); p.html() - .head().title("Moved")._() + .head().title("Moved").__() .body() .h1("Moved") .div() - ._("Content has moved ") - .a(location, "here")._() - ._()._(); + .__("Content has moved ") + .a(location, "here").__() + .__().__(); writer.close(); } @@ -110,7 +110,7 @@ public class ProxyUtils { Page p = new Page(resp.getWriter()); p.html(). h1(message). - _(); + __(); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java index b32ee301554..e1588c115a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java @@ -58,7 +58,7 @@ import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.util.TrackingUriPlugin; import org.apache.hadoop.yarn.webapp.MimeType; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.http.Header; import org.apache.http.HttpResponse; @@ -108,7 +108,7 @@ public class WebAppProxyServlet extends HttpServlet { /** * Empty Hamlet class. */ - private static class _ implements Hamlet._ { + private static class __ implements Hamlet.__ { //Empty } @@ -117,7 +117,7 @@ public class WebAppProxyServlet extends HttpServlet { super(out, 0, false); } - public HTML html() { + public HTML html() { return new HTML<>("html", null, EnumSet.of(EOpt.ENDTAG)); } } @@ -172,10 +172,10 @@ public class WebAppProxyServlet extends HttpServlet { p.html(). h1("WARNING: The following page may not be safe!"). h3(). - _("click ").a(link, "here"). - _(" to continue to an Application Master web interface owned by ", user). - _(). - _(); + __("click ").a(link, "here"). + __(" to continue to an Application Master web interface owned by ", user). + __(). + __(); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml index 517326b59d6..8335fc87843 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml @@ -45,5 +45,6 @@ hadoop-yarn-server-timelineservice hadoop-yarn-server-timelineservice-hbase hadoop-yarn-server-timelineservice-hbase-tests + hadoop-yarn-server-router diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md new file mode 100644 index 00000000000..8a6c1371926 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md @@ -0,0 +1,309 @@ + + +Hadoop: YARN Federation +======================= + + + +Purpose +------- +YARN is known to scale to thousands of nodes. The scalability of [YARN](./YARN.html) is determined by the Resource Manager, and is proportional to number of nodes, active applications, active containers, and frequency of heartbeat (of both nodes and applications). Lowering heartbeat can provide scalability increase, but is detrimental to utilization (see old Hadoop 1.x experience). +This document described a federation-based approach to scale a single YARN cluster to tens of thousands of nodes, by federating multiple YARN sub-clusters. The proposed approach is to divide a large (10-100k nodes) cluster into smaller units called sub-clusters, each with its own YARN RM and compute nodes. The federation system will stitch these sub-clusters together and make them appear as one large YARN cluster to the applications. +The applications running in this federated environment will see a single massive YARN cluster and will be able to schedule tasks on any node of the federated cluster. Under the hood, the federation system will negotiate with sub-clusters resource managers and provide resources to the application. The goal is to allow an individual job to “span” sub-clusters seamlessly. + +This design is structurally scalable, as we bound the number of nodes each RM is responsible for, and appropriate policies, will try to ensure that the majority of applications will reside within a single sub-cluster, thus the number of applications each RM will see is also bounded. This means we could almost linearly scale, by simply adding sub-clusters (as very little coordination is needed across them). +This architecture can provide very tight enforcement of scheduling invariants within each sub-cluster (simply inherits from YARN), while continuous rebalancing across subcluster will enforce (less strictly) that these properties are also respected at a global level (e.g., if a sub-cluster loses a large number of nodes, we could re-map queues to other sub-clusters to ensure users running on the impaired sub-cluster are not unfairly affected). + +Federation is designed as a “layer” atop of existing YARN codebase, with limited changes in the core YARN mechanisms. + +Assumptions: +* We assume reasonably good connectivity across sub-clusters (e.g., we are not looking to federate across DC yet, though future investigations of this are not excluded). +* We rely on HDFS federation (or equivalently scalable DFS solutions) to take care of scalability of the store side. + + +Architecture +------------ +OSS YARN has been known to scale up to about few thousand nodes. The proposed architecture leverages the notion of federating a number of such smaller YARN clusters, referred to as sub-clusters, into a larger federated YARN cluster comprising of tens of thousands of nodes. +The applications running in this federated environment see a unified large YARN cluster and will be able to schedule tasks on any nodes in the cluster. Under the hood, the federation system will negotiate with sub-clusters RMs and provide resources to the application. The logical architecture in Figure 1 shows the main components that comprise the federated cluster, which are described below. + +![YARN Federation Architecture | width=800](./images/federation_architecture.png) + +###YARN Sub-cluster +A sub-cluster is a YARN cluster with up to few thousands nodes. The exact size of the sub-cluster will be determined considering ease of deployment/maintenance, alignment +with network or availability zones and general best practices. + +The sub-cluster YARN RM will run with work-preserving high-availability turned-on, i.e., we should be able to tolerate YARN RM, NM failures with minimal disruption. +If the entire sub-cluster is compromised, external mechanisms will ensure that jobs are resubmitted in a separate sub-cluster (this could eventually be included in the federation design). + +Sub-cluster is also the scalability unit in a federated environment. We can scale out the federated environment by adding one or more sub-clusters. + +*Note*: by design each sub-cluster is a fully functional YARN RM, and its contribution to the federation can be set to be only a fraction of its overall capacity, +i.e. a sub-cluster can have a “partial” commitment to the federation, while retaining the ability to give out part of its capacity in a completely local way. + +###Router +YARN applications are submitted to one of the Routers, which in turn applies a routing policy (obtained from the Policy Store), queries the State Store for the sub-cluster +URL and redirects the application submission request to the appropriate sub-cluster RM. We call the sub-cluster where the job is started the “home sub-cluster”, and we call +“secondary sub-clusters” all other sub-cluster a job is spanning on. +The Router exposes the ApplicationClientProtocol to the outside world, transparently hiding the presence of multiple RMs. To achieve this the Router also persists the mapping +between the application and its home sub-cluster into the State Store. This allows Routers to be soft-state while supporting user requests cheaply, as any Router can recover +this application to home sub-cluster mapping and direct requests to the right RM without broadcasting them. For performance caching and session stickiness might be advisable. + +###AMRMProxy +The AMRMProxy is a key component to allow the application to scale and run across sub-clusters. The AMRMProxy runs on all the NM machines and acts as a proxy to the +YARN RM for the AMs by implementing the ApplicationMasterProtocol. Applications will not be allowed to communicate with the sub-cluster RMs directly. They are forced +by the system to connect only to the AMRMProxy endpoint, which would provide transparent access to multiple YARN RMs (by dynamically routing/splitting/merging the communications). +At any one time, a job can span across one home sub-cluster and multiple secondary sub-clusters, but the policies operating in the AMRMProxy try to limit the footprint of each job +to minimize overhead on the scheduling infrastructure (more in section on scalability/load). The interceptor chain architecture of the ARMMProxy is showing in figure. + +![Architecture of the AMRMProxy interceptor chain | width=800](./images/amrmproxy_architecture.png) + +*Role of AMRMProxy* +1. Protect the sub-cluster YARN RMs from misbehaving AMs. The AMRMProxy can prevent DDOS attacks by throttling/killing AMs that are asking too many resources. +2. Mask the multiple YARN RMs in the cluster, and can transparently allow the AM to span across sub-clusters. All container allocations are done by the YARN RM framework that consists of the AMRMProxy fronting the home and other sub-cluster RMs. +3. Intercepts all the requests, thus it can enforce application quotas, which would not be enforceable by sub-cluster RM (as each only see a fraction of the AM requests). +4. The AMRMProxy can enforce load-balancing / overflow policies. + +###Global Policy Generator +Global Policy Generator overlooks the entire federation and ensures that the system is configured and tuned properly all the time. +A key design point is that the cluster availability does not depends on an always-on GPG. The GPG operates continuously but out-of-band from all cluster operations, +and provide us with a unique vantage point, that allows to enforce global invariants, affect load balancing, trigger draining of sub-clusters that will undergo maintenance, etc. +More precisely the GPG will update user capacity allocation-to-subcluster mappings, and more rarely change the policies that run in Routers, AMRMProxy (and possible RMs). + +In case the GPG is not-available, cluster operations will continue as of the last time the GPG published policies, and while a long-term unavailability might mean some +of the desirable properties of balance, optimal cluster utilization and global invariants might drift away, compute and access to data will not be compromised. + +*NOTE*: In the current implementation the GPG is a manual tuning process, simply exposed via a CLI (YARN-3657). + +This part of the federation system is part of future work in [YARN-5597](https://issues.apache.org/jira/browse/YARN-5597). + + +###Federation State-Store +The Federation State defines the additional state that needs to be maintained to loosely couple multiple individual sub-clusters into a single large federated cluster. This includes the following information: + +####Sub-cluster Membership +The member YARN RMs continuously heartbeat to the state store to keep alive and publish their current capability/load information. This information is used by the +Global Policy Generator (GPG) to make proper policy decisions. Also this information can be used by routers to select the best home sub-cluster. This mechanism allows +us to dynamically grow/shrink the “cluster fleet” by adding or removing sub-clusters. This also allows for easy maintenance of each sub-cluster. This is new functionality +that needs to be added to the YARN RM but the mechanisms are well understood as it’s similar to individual YARN RM HA. + +####Application’s Home Sub-cluster +The sub-cluster on which the Application Master (AM) runs is called the Application’s “home sub-cluster”. The AM is not limited to resources from the home sub-cluster +but can also request resources from other sub-clusters, referred to as secondary sub-clusters. +The federated environment will be configured and tuned periodically such that when an AM is placed on a sub-cluster, it should be able to find most of the resources +on the home sub-cluster. Only in certain cases it should need to ask for resources from other sub-clusters. + +###Federation Policy Store +The federation Policy Store is a logically separate store (while it might be backed +by the same physical component), which contains information about how applications and +resource requests are routed to different sub-clusters. The current implementation provides +several policies, ranging from random/hashing/roundrobin/priority to more sophisticated +ones which account for sub-cluster load, and request locality needs. + + +Running Applications across Sub-Clusters +---------------------------------------- + +When an application is submitted, the system will determine the most appropriate sub-cluster to run the application, +which we call as the application’s home sub-cluster. All the communications from the AM to the RM will be proxied via +the AMRMProxy running locally on the AM machine. +AMRMProxy exposes the same ApplicationMasterService protocol endpoint as the YARN RM. The AM can request containers +using the locality information exposed by the storage layer. In ideal case, the application will be placed on a sub-cluster +where all the resources and data required by the application will be available, but if it does need containers on nodes in +other sub-clusters, AMRMProxy will negotiate with the RMs of those sub-clusters transparently and provide the resources to +the application, thereby enabling the application to view the entire federated environment as one massive YARN cluster. +AMRMProxy, Global Policy Generator (GPG) and Router work together to make this happen seamlessly. + +![Federation Sequence Diagram | width=800](./images/federation_sequence_diagram.png) + + +The figure shows a sequence diagram for the following job execution flow: + +1. The Router receives an application submission request that is complaint to the YARN Application Client Protocol. +2. The router interrogates a routing table / policy to choose the “home RM” for the job (the policy configuration is received from the state-store on heartbeat). +3. The router queries the membership state to determine the endpoint of the home RM. +4. The router then redirects the application submission request to the home RM. +5. The router updates the application state with the home sub-cluster identifier. +6. Once the application is submitted to the home RM, the stock YARN flow is triggered, i.e. the application is added to the scheduler queue and its AM started in the home sub-cluster, on the first NodeManager that has available resources. + a. During this process, the AM environment is modified by indicating that the address of the AMRMProxy as the YARN RM to talk to. + b. The security tokens are also modified by the NM when launching the AM, so that the AM can only talk with the AMRMProxy. Any future communication from AM to the YARN RM is mediated by the AMRMProxy. +7. The AM will then request containers using the locality information exposed by HDFS. +8. Based on a policy the AMRMProxy can impersonate the AM on other sub-clusters, by submitting an Unmanaged AM, and by forwarding the AM heartbeats to relevant sub-clusters. +9. The AMRMProxy will use both locality information and a pluggable policy configured in the state-store to decide whether to forward the resource requests received by the AM to the Home RM or to one (or more) Secondary RMs. In Figure 1, we show the case in which the AMRMProxy decides to forward the request to the secondary RM. +10. The secondary RM will provide the AMRMProxy with valid container tokens to start a new container on some node in its sub-cluster. This mechanism ensures that each sub-cluster uses its own security tokens and avoids the need for a cluster wide shared secret to create tokens. +11. The AMRMProxy forwards the allocation response back to the AM. +12. The AM starts the container on the target NodeManager (on sub-cluster 2) using the standard YARN protocols. + + + + +Configuration +------------- + + To configure the `YARN` to use the `Federation`, set the following property in the **conf/yarn-site.xml**: + +###EVERYWHERE: + +These are common configurations that should appear in the **conf/yarn-site.xml** at each machine in the federation. + + +| Property | Example | Description | +|:---- |:---- | +|`yarn.federation.enabled` | `true` | Whether federation is enabled or not | +|`yarn.resourcemanager.cluster-id` | `` | The unique subcluster identifier for this RM (same as the one used for HA). | + +####State-Store: + +Currently, we support ZooKeeper and SQL based implementations of the state-store. + +**Note:** The State-Store implementation must always be overwritten with one of the below. + +ZooKeeper: one must set the ZooKeeper settings for Hadoop: + +| Property | Example | Description | +|:---- |:---- | +|`yarn.federation.state-store.class` | `org.apache.hadoop.yarn.server.federation.store.impl.ZookeeperFederationStateStore` | The type of state-store to use. | +|`hadoop.zk.address` | `host:port` | The address for the ZooKeeper ensemble. | + +SQL: one must setup the following parameters: + +| Property | Example | Description | +|:---- |:---- | +|`yarn.federation.state-store.class` | `org.apache.hadoop.yarn.server.federation.store.impl.SQLFederationStateStore` | The type of state-store to use. | +|`yarn.federation.state-store.sql.url` | `jdbc:mysql://:/FederationStateStore` | For SQLFederationStateStore the name of the DB where the state is stored. | +|`yarn.federation.state-store.sql.jdbc-class` | `com.mysql.jdbc.jdbc2.optional.MysqlDataSource` | For SQLFederationStateStore the jdbc class to use. | +|`yarn.federation.state-store.sql.username` | `` | For SQLFederationStateStore the username for the DB connection. | +|`yarn.federation.state-store.sql.password` | `` | For SQLFederationStateStore the password for the DB connection. | + +We provide scripts for MySQL and Microsoft SQL Server. + +For MySQL, one must download the latest jar version 5.x from [MVN Repository](https://mvnrepository.com/artifact/mysql/mysql-connector-java) and add it to the CLASSPATH. +Then the DB schema is created by executing the following SQL scripts in the database: + +1. **sbin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql**. +2. **sbin/FederationStateStore/MySQL/FederationStateStoreUser.sql**. +3. **sbin/FederationStateStore/MySQL/FederationStateStoreTables.sql**. +4. **sbin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql**. + +In the same directory we provide scripts to drop the Stored Procedures, the Tables, the User and the Database. + +**Note:** the FederationStateStoreUser.sql defines a default user/password for the DB that you are **highly encouraged** to set this to a proper strong password. + +For SQL-Server, the process is similar, but the jdbc driver is already included. +SQL-Server scripts are located in **sbin/FederationStateStore/SQLServer/**. + + +####Optional: + +| Property | Example | Description | +|:---- |:---- | +|`yarn.federation.failover.enabled` | `true` | Whether should retry considering RM failover within each subcluster. | +|`yarn.federation.blacklist-subclusters` | `` | A list of black-listed sub-clusters, useful to disable a sub-cluster | +|`yarn.federation.policy-manager` | `org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager` | The choice of policy manager determines how Applications and ResourceRequests are routed through the system. | +|`yarn.federation.policy-manager-params` | `` | The payload that configures the policy. In our example a set of weights for router and amrmproxy policies. This is typically generated by serializing a policymanager that has been configured programmatically, or by populating the state-store with the .json serialized form of it. | +|`yarn.federation.subcluster-resolver.class` | `org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl` | The class used to resolve which subcluster a node belongs to, and which subcluster(s) a rack belongs to. | +| `yarn.federation.machine-list` | `node1,subcluster1,rack1\n node2 , subcluster2, RACK1\n node3,subcluster3, rack2\n node4, subcluster3, rack2\n` | a list of Nodes, Sub-clusters, Rack, used by the `DefaultSubClusterResolverImpl` | + +###ON RMs: + +These are extra configurations that should appear in the **conf/yarn-site.xml** at each ResourceManager. + +| Property | Example | Description | +|:---- |:---- | +|`yarn.resourcemanager.epoch` | `` | The seed value for the epoch. This is used to guarantee uniqueness of container-IDs generate by different RMs. It must therefore be unique among sub-clusters and `well-spaced` to allow for failures which increment epoch. Increments of 1000 allow for a large number of sub-clusters and + practically ensure near-zero chance of collisions (a clash will only happen if a container is still alive for 1000 restarts of one RM, while the next RM never restarted, and an app requests more containers). | + +Optional: + +| Property | Example | Description | +|:---- |:---- | +|`yarn.federation.state-store.heartbeat-interval-secs` | `60` | The rate at which RMs report their membership to the federation to the central state-store. | + + +###ON ROUTER: + +These are extra configurations that should appear in the **conf/yarn-site.xml** at each Router. + +| Property | Example | Description | +|:---- |:---- | +|`yarn.router.bind-host` | `0.0.0.0` | Host IP to bind the router to. The actual address the server will bind to. If this optional address is set, the RPC and webapp servers will bind to this address and the port specified in yarn.router.*.address respectively. This is most useful for making Router listen to all interfaces by setting to 0.0.0.0. | +| `yarn.router.clientrm.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.clientrm.FederationClientInterceptor` | A comma-seperated list of interceptor classes to be run at the router when interfacing with the client. The last step of this pipeline must be the Federation Client Interceptor. | + +Optional: + +| Property | Example | Description | +|:---- |:---- | +|`yarn.router.hostname` | `0.0.0.0` | Router host name. +|`yarn.router.clientrm.address` | `0.0.0.0:8050` | Router client address. | +|`yarn.router.webapp.address` | `0.0.0.0:80` | Webapp address at the router. | +|`yarn.router.admin.address` | `0.0.0.0:8052` | Admin address at the router. | +|`yarn.router.webapp.https.address` | `0.0.0.0:443` | Secure webapp address at the router. | +|`yarn.router.submit.retry` | `3` | The number of retries in the router before we give up. | +|`yarn.federation.statestore.max-connections` | `10` | This is the maximum number of parallel connections each Router makes to the state-store. | +|`yarn.federation.cache-ttl.secs` | `60` | The Router caches informations, and this is the time to leave before the cache is invalidated. | +|`yarn.router.webapp.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.router.webapp.FederationInterceptorREST` | A comma-seperated list of interceptor classes to be run at the router when interfacing with the client via REST interface. The last step of this pipeline must be the Federation Interceptor REST. | + +###ON NMs: + +These are extra configurations that should appear in the **conf/yarn-site.xml** at each NodeManager. + + +| Property | Example | Description | +|:---- |:---- | +| `yarn.nodemanager.amrmproxy.enabled` | `true` | Whether or not the AMRMProxy is enabled. +|`yarn.nodemanager.amrmproxy.interceptor-class.pipeline` | `org.apache.hadoop.yarn.server.nodemanager.amrmproxy.FederationInterceptor` | A comma-separated list of interceptors to be run at the amrmproxy. For federation the last step in the pipeline should be the FederationInterceptor. +| `yarn.client.failover-proxy-provider` | `org.apache.hadoop.yarn.server.federation.failover.FederationRMFailoverProxyProvider` | The class used to connect to the RMs by looking up the membership information in federation state-store. This must be set if federation is enabled, even if RM HA is not enabled.| + +Optional: + +| Property | Example | Description | +|:---- |:---- | +|`yarn.federation.statestore.max-connections` | `1` | The maximum number of parallel connections from each AMRMProxy to the state-store. This value is typically lower than the router one, since we have many AMRMProxy that could burn-through many DB connections quickly. | +|`yarn.federation.cache-ttl.secs` | `300` | The time to leave for the AMRMProxy cache. Typically larger than at the router, as the number of AMRMProxy is large, and we want to limit the load to the centralized state-store. | + +Running a Sample Job +-------------------- +In order to submit jobs to a Federation cluster one must create a seperate set of configs for the client from which jobs will be submitted. In these, the **conf/yarn-site.xml** should have the following additional configurations: + +| Property | Example | Description | +|:--- |:--- | +| `yarn.resourcemanager.address` | `:8050` | Redirects jobs launched at the client to the router's client RM port. | +| `yarn.resourcemanger.scheduler.address` | `localhost:8049` | Redirects jobs to the federation AMRMProxy port.| + +Any YARN jobs for the cluster can be submitted from the client configurations described above. In order to launch a job through federation, first start up all the clusters involved in the federation as described [here](../../hadoop-project-dist/hadoop-common/ClusterSetup.html). Next, start up the router on the router machine with the following command: + + $HADOOP_HOME/bin/yarn --daemon start router + +Now with $HADOOP_CONF_DIR pointing to the client configurations folder that is described above, run your job the usual way. The configurations in the client configurations folder described above will direct the job to the router's client RM port where the router should be listening after being started. Here is an example run of a Pi job on a federation cluster from the client: + + $HADOOP_HOME/bin/yarn jar hadoop-mapreduce-examples-3.0.0.jar pi 16 1000 + +This job is submitted to the router which as described above, uses a generated policy from the [GPG](#Global_Policy_Generator) to pick a home RM for the job to which it is submitted. + +The output from this particular example job should be something like: + + 2017-07-13 16:29:25,055 INFO mapreduce.Job: Job job_1499988226739_0001 running in uber mode : false + 2017-07-13 16:29:25,056 INFO mapreduce.Job: map 0% reduce 0% + 2017-07-13 16:29:33,131 INFO mapreduce.Job: map 38% reduce 0% + 2017-07-13 16:29:39,176 INFO mapreduce.Job: map 75% reduce 0% + 2017-07-13 16:29:45,217 INFO mapreduce.Job: map 94% reduce 0% + 2017-07-13 16:29:46,228 INFO mapreduce.Job: map 100% reduce 100% + 2017-07-13 16:29:46,235 INFO mapreduce.Job: Job job_1499988226739_0001 completed successfully + . + . + . + Job Finished in 30.586 seconds + Estimated value of Pi is 3.14250000...... + +Note that no change in the code or recompilation of the input jar was required to use federation. Also, the output of this job is the exact same as it would be when run without federation. Also, in order to get the full benefit of federation, use a large enough number of mappers such that more than one cluster is required. That number happens to be 16 in the case of the above example. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md index 2acb3d29d1e..2e83ca20bed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/GracefulDecommission.md @@ -13,7 +13,7 @@ --> -Graceful Decommission of Yarn Nodes +Graceful Decommission of YARN Nodes =============== * [Overview](#overview) @@ -29,19 +29,19 @@ Graceful Decommission of Yarn Nodes Overview -------- -Yarn is scalable very easily: any new NodeManager could join to the configured ResourceManager and start to execute jobs. But to achieve full elasticity we need a decommissioning process which helps to remove existing nodes and down-scale the cluster. +YARN is scalable very easily: any new NodeManager could join to the configured ResourceManager and start to execute jobs. But to achieve full elasticity we need a decommissioning process which helps to remove existing nodes and down-scale the cluster. -Yarn Nodes could be decommissioned NORMAL or GRACEFUL. +YARN Nodes could be decommissioned NORMAL or GRACEFUL. -Normal Decommission of Yarn Nodes means an immediate shutdown. +Normal Decommission of YARN Nodes means an immediate shutdown. -Graceful Decommission of Yarn Nodes is the mechanism to decommission NMs while minimize the impact to running applications. Once a node is in DECOMMISSIONING state, RM won't schedule new containers on it and will wait for running containers and applications to complete (or until decommissioning timeout exceeded) before transition the node into DECOMMISSIONED. +Graceful Decommission of YARN Nodes is the mechanism to decommission NMs while minimize the impact to running applications. Once a node is in DECOMMISSIONING state, RM won't schedule new containers on it and will wait for running containers and applications to complete (or until decommissioning timeout exceeded) before transition the node into DECOMMISSIONED. ## Quick start To do a normal decommissioning: -1. Start a Yarn cluster (with NodeManageres and ResourceManager) +1. Start a YARN cluster (with NodeManageres and ResourceManager) 2. Start a yarn job (for example with `yarn jar...` ) 3. Add `yarn.resourcemanager.nodes.exclude-path` property to your `yarn-site.xml` (Note: you don't need to restart the ResourceManager) 4. Create a text file (the location is defined in the previous step) with one line which contains the name of a selected NodeManager diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md index 2704f10a1c3..d36280113cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md @@ -17,7 +17,7 @@ Using CGroups with YARN -CGroups is a mechanism for aggregating/partitioning sets of tasks, and all their future children, into hierarchical groups with specialized behaviour. CGroups is a Linux kernel feature and was merged into kernel version 2.6.24. From a YARN perspective, this allows containers to be limited in their resource usage. A good example of this is CPU usage. Without CGroups, it becomes hard to limit container CPU usage. Currently, CGroups is only used for limiting CPU usage. +CGroups is a mechanism for aggregating/partitioning sets of tasks, and all their future children, into hierarchical groups with specialized behaviour. CGroups is a Linux kernel feature and was merged into kernel version 2.6.24. From a YARN perspective, this allows containers to be limited in their resource usage. A good example of this is CPU usage. Without CGroups, it becomes hard to limit container CPU usage. CGroups Configuration --------------------- @@ -30,9 +30,9 @@ The following settings are related to setting up CGroups. These need to be set i |:---- |:---- | | `yarn.nodemanager.container-executor.class` | This should be set to "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor". CGroups is a Linux kernel feature and is exposed via the LinuxContainerExecutor. | | `yarn.nodemanager.linux-container-executor.resources-handler.class` | This should be set to "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler". Using the LinuxContainerExecutor doesn't force you to use CGroups. If you wish to use CGroups, the resource-handler-class must be set to CGroupsLCEResourceHandler. | -| `yarn.nodemanager.linux-container-executor.cgroups.hierarchy` | The cgroups hierarchy under which to place YARN proccesses(cannot contain commas). If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have been pre-configured) and the Yarn user has write access to the parent directory, then the directory will be created. If the directory already exists, the administrator has to give Yarn write permissions to it recursively. | +| `yarn.nodemanager.linux-container-executor.cgroups.hierarchy` | The cgroups hierarchy under which to place YARN proccesses(cannot contain commas). If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have been pre-configured) and the YARN user has write access to the parent directory, then the directory will be created. If the directory already exists, the administrator has to give YARN write permissions to it recursively. | | `yarn.nodemanager.linux-container-executor.cgroups.mount` | Whether the LCE should attempt to mount cgroups if not found - can be true or false. | -| `yarn.nodemanager.linux-container-executor.cgroups.mount-path` | Where the LCE should attempt to mount cgroups if not found. Common locations include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux distribution in use. This path must exist before the NodeManager is launched. Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and yarn.nodemanager.linux-container-executor.cgroups.mount is true. A point to note here is that the container-executor binary will try to mount the path specified + "/" + the subsystem. In our case, since we are trying to limit CPU the binary tries to mount the path specified + "/cpu" and that's the path it expects to exist. | +| `yarn.nodemanager.linux-container-executor.cgroups.mount-path` | Optional. Where CGroups are located. LCE will try to mount them here, if `yarn.nodemanager.linux-container-executor.cgroups.mount` is true. LCE will try to use CGroups from this location, if `yarn.nodemanager.linux-container-executor.cgroups.mount` is false. If specified, this path and its subdirectories (CGroup hierarchies) must exist and they should be readable and writable by YARN before the NodeManager is launched. See CGroups mount options below for details. | | `yarn.nodemanager.linux-container-executor.group` | The Unix group of the NodeManager. It should match the setting in "container-executor.cfg". This configuration is required for validating the secure access of the container-executor binary. | The following settings are related to limiting resource usage of YARN containers: @@ -42,6 +42,17 @@ The following settings are related to limiting resource usage of YARN containers | `yarn.nodemanager.resource.percentage-physical-cpu-limit` | This setting lets you limit the cpu usage of all YARN containers. It sets a hard upper limit on the cumulative CPU usage of the containers. For example, if set to 60, the combined CPU usage of all YARN containers will not exceed 60%. | | `yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage` | CGroups allows cpu usage limits to be hard or soft. When this setting is true, containers cannot use more CPU usage than allocated even if spare CPU is available. This ensures that containers can only use CPU that they were allocated. When set to false, containers can use spare CPU if available. It should be noted that irrespective of whether set to true or false, at no time can the combined CPU usage of all containers exceed the value specified in "yarn.nodemanager.resource.percentage-physical-cpu-limit". | +CGroups mount options +--------------------- + +YARN uses CGroups through a directory structure mounted into the file system by the kernel. There are three options to attach to CGroups. + +| Option | Description | +|:---- |:---- | +| Discover CGroups mounted already | This should be used on newer systems like RHEL7 or Ubuntu16 or if the administrator mounts CGroups before YARN starts. Set `yarn.nodemanager.linux-container-executor.cgroups.mount` to false and leave other settings set to their defaults. YARN will locate the mount points in `/proc/mounts`. Common locations include `/sys/fs/cgroup` and `/cgroup`. The default location can vary depending on the Linux distribution in use.| +| CGroups mounted by YARN | If the system does not have CGroups mounted or it is mounted to an inaccessible location then point `yarn.nodemanager.linux-container-executor.cgroups.mount-path` to an empty directory. Set `yarn.nodemanager.linux-container-executor.cgroups.mount` to true. A point to note here is that the container-executor binary will try to create and mount each subsystem as a subdirectory under this path. If `cpu` is already mounted somewhere with `cpuacct`, then the directory `cpu,cpuacct` will be created for the hierarchy.| +| CGroups mounted already or linked but not in `/proc/mounts` | If cgroups is accessible through lxcfs or simulated by another filesystem, then point `yarn.nodemanager.linux-container-executor.cgroups.mount-path` to your CGroups root directory. Set `yarn.nodemanager.linux-container-executor.cgroups.mount` to false. YARN tries to use this path first, before any CGroup mount point discovery. The path should have a subdirectory for each CGroup hierarchy named by the comma separated CGroup subsystems supported like `/cpu,cpuacct`. Valid subsystem names are `cpu, cpuacct, cpuset, memory, net_cls, blkio, freezer, devices`.| + CGroups and security -------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md index 07c37655d8f..f1308d5c2e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md @@ -56,7 +56,7 @@ Following are the important interfaces: * Under very rare circumstances, programmer may want to directly use the 3 protocols to implement an application. However, note that *such behaviors are no longer encouraged for general use cases*. -Writing a Simple Yarn Application +Writing a Simple YARN Application --------------------------------- ### Writing a simple Client @@ -574,4 +574,4 @@ Useful Links Sample Code ----------- -Yarn distributed shell: in `hadoop-yarn-applications-distributedshell` project after you set up your development environment. +YARN distributed shell: in `hadoop-yarn-applications-distributedshell` project after you set up your development environment. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md index 433c737365a..598ee6fe870 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YARN.md @@ -33,4 +33,7 @@ The ApplicationsManager is responsible for accepting job-submissions, negotiatin MapReduce in hadoop-2.x maintains **API compatibility** with previous stable release (hadoop-1.x). This means that all MapReduce jobs should still run unchanged on top of YARN with just a recompile. -YARN also supports the notion of **resource reservation** via the [ReservationSystem](./ReservationSystem.html), a component that allows users to specify a profile of resources over-time and temporal constraints (e.g., deadlines), and reserve resources to ensure the predictable execution of important jobs.The *ReservationSystem* tracks resources over-time, performs admission control for reservations, and dynamically instruct the underlying scheduler to ensure that the reservation is fullfilled. +YARN supports the notion of **resource reservation** via the [ReservationSystem](./ReservationSystem.html), a component that allows users to specify a profile of resources over-time and temporal constraints (e.g., deadlines), and reserve resources to ensure the predictable execution of important jobs.The *ReservationSystem* tracks resources over-time, performs admission control for reservations, and dynamically instruct the underlying scheduler to ensure that the reservation is fullfilled. + +In order to scale YARN beyond few thousands nodes, YARN supports the notion of **Federation** via the [YARN Federation](.Federation.html) feature. Federation allows to transparently wire together multiple yarn (sub-)clusters, and + make them appear as a single massive cluster. This can be used to achieve larger scale, and/or to allow multiple independent clusters to be used together for very large jobs, or for tenants who have capacity across all of them. \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/yarn-registry.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/yarn-registry.md index f5055d9f12a..4973862ba82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/yarn-registry.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/yarn-registry.md @@ -84,7 +84,7 @@ container ID. ## The binding problem Hadoop YARN allows applications to run on the Hadoop cluster. Some of these are -batch jobs or queries that can managed via Yarn’s existing API using its +batch jobs or queries that can managed via YARN’s existing API using its application ID. In addition YARN can deploy ong-lived services instances such a pool of Apache Tomcat web servers or an Apache HBase cluster. YARN will deploy them across the cluster depending on the individual each component requirements @@ -121,7 +121,7 @@ services accessible from within the Hadoop cluster /services/yarn /services/oozie -Yarn-deployed services belonging to individual users. +YARN-deployed services belonging to individual users. /users/joe/org-apache-hbase/demo1 /users/joe/org-apache-hbase/demo1/components/regionserver1 @@ -148,7 +148,7 @@ their application master, to which they heartbeat regularly. ## Unsupported Registration use cases: -1. A short-lived Yarn application is registered automatically in the registry, +1. A short-lived YARN application is registered automatically in the registry, including all its containers. and unregistered when the job terminates. Short-lived applications with many containers will place excessive load on a registry. All YARN applications will be given the option of registering, but it @@ -259,7 +259,7 @@ service since it supports many of the properties, We pick a part of the ZK namespace to be the root of the service registry ( default: `yarnRegistry`). On top this base implementation we build our registry service API and the -naming conventions that Yarn will use for its services. The registry will be +naming conventions that YARN will use for its services. The registry will be accessed by the registry API, not directly via ZK - ZK is just an implementation choice (although unlikely to change in the future). @@ -297,7 +297,7 @@ them. 6. Core services will be registered using the following convention: `/services/{servicename}` e.g. `/services/hdfs`. -7. Yarn services SHOULD be registered using the following convention: +7. YARN services SHOULD be registered using the following convention: /users/{username}/{serviceclass}/{instancename} @@ -823,8 +823,8 @@ The `RegistryPathStatus` class summarizes the contents of a node in the registry ## Security The registry will allow a service instance can only be registered under the -path where it has permissions. Yarn will create directories with appropriate -permissions for users where Yarn deployed services can be registered by a user. +path where it has permissions. YARN will create directories with appropriate +permissions for users where YARN deployed services can be registered by a user. of the user account of the service instance. The admin will also create directories (such as `/services`) with appropriate permissions (where core Hadoop services can register themselves. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/amrmproxy_architecture.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/amrmproxy_architecture.png new file mode 100644 index 00000000000..8740adaed54 Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/amrmproxy_architecture.png differ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/federation_architecture.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/federation_architecture.png new file mode 100644 index 00000000000..b2ed72fac20 Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/federation_architecture.png differ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/federation_sequence_diagram.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/federation_sequence_diagram.png new file mode 100644 index 00000000000..32812e06520 Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/federation_sequence_diagram.png differ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js new file mode 100644 index 00000000000..7eb9f76b0af --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import YarnQueueAdapter from './yarn-queue'; + +export default YarnQueueAdapter.extend({ + +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fair-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fair-queue.js new file mode 100644 index 00000000000..7eb9f76b0af --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fair-queue.js @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import YarnQueueAdapter from './yarn-queue'; + +export default YarnQueueAdapter.extend({ + +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fifo-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fifo-queue.js new file mode 100644 index 00000000000..7eb9f76b0af --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/fifo-queue.js @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import YarnQueueAdapter from './yarn-queue'; + +export default YarnQueueAdapter.extend({ + +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/yarn-queue.js similarity index 96% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/yarn-queue.js index f2017df5bc4..8184c39cf09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/yarn-queue.js @@ -16,7 +16,7 @@ * limitations under the License. */ -import AbstractAdapter from './abstract'; +import AbstractAdapter from '../abstract'; export default AbstractAdapter.extend({ address: "rmWebAddress", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js index 3d72b2fbbf7..1a81a321ffb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js @@ -39,6 +39,9 @@ export default Ember.Component.extend({ // mainSvg mainSvg: undefined, + used: undefined, + max: undefined, + // Init data initData: function() { this.map = { }; @@ -52,7 +55,8 @@ export default Ember.Component.extend({ }.bind(this)); // var selected = this.get("selected"); - + this.used = this.get("used"); + this.max = this.get("max"); this.initQueue("root", 1, this.treeData); }, @@ -81,7 +85,6 @@ export default Ember.Component.extend({ // Queue is not existed return; } - if (depth > this.maxDepth) { this.maxDepth = this.maxDepth + 1; } @@ -149,7 +152,9 @@ export default Ember.Component.extend({ nodeEnter.append("circle") .attr("r", 1e-6) .style("fill", function(d) { - var usedCap = d.queueData.get("usedCapacity"); + var maxCap = d.queueData.get(this.max); + maxCap = maxCap == undefined ? 100 : maxCap; + var usedCap = d.queueData.get(this.used) / maxCap * 100.0; if (usedCap <= 60.0) { return "LimeGreen"; } else if (usedCap <= 100.0) { @@ -157,7 +162,7 @@ export default Ember.Component.extend({ } else { return "LightCoral"; } - }); + }.bind(this)); // append percentage nodeEnter.append("text") @@ -166,13 +171,15 @@ export default Ember.Component.extend({ .attr("fill", "white") .attr("text-anchor", function() { return "middle"; }) .text(function(d) { - var usedCap = d.queueData.get("usedCapacity"); + var maxCap = d.queueData.get(this.max); + maxCap = maxCap == undefined ? 100 : maxCap; + var usedCap = d.queueData.get(this.used) / maxCap * 100.0; if (usedCap >= 100.0) { return usedCap.toFixed(0) + "%"; } else { return usedCap.toFixed(1) + "%"; } - }) + }.bind(this)) .style("fill-opacity", 1e-6); // append queue name diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js index aa8fb07e769..55f6e1b3c04 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js @@ -20,25 +20,27 @@ import Ember from 'ember'; -function getTimeLineURL() { - return '/conf?name=yarn.timeline-service.webapp.address'; +function getTimeLineURL(rmhost) { + var url = window.location.protocol + '//' + + (ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + rmhost; + + url += '/conf?name=yarn.timeline-service.webapp.address'; + Ember.Logger.log("Get Timeline Address URL: " + url); + return url; } function updateConfigs(application) { var hostname = window.location.hostname; - var rmhost = hostname + - (window.location.port ? ':' + window.location.port: ''); - - Ember.Logger.log("RM Address:" + rmhost); + var rmhost = hostname + (window.location.port ? ':' + window.location.port: ''); if(!ENV.hosts.rmWebAddress) { - ENV = { - hosts: { - rmWebAddress: rmhost, - }, - }; + ENV.hosts.rmWebAddress = rmhost; + } else { + rmhost = ENV.hosts.rmWebAddress; } + Ember.Logger.log("RM Address: " + rmhost); + if(!ENV.hosts.timelineWebAddress) { var timelinehost = ""; $.ajax({ @@ -46,7 +48,7 @@ function updateConfigs(application) { dataType: 'json', async: true, context: this, - url: getTimeLineURL(), + url: getTimeLineURL(rmhost), success: function(data) { timelinehost = data.property.value; ENV.hosts.timelineWebAddress = timelinehost; @@ -54,24 +56,18 @@ function updateConfigs(application) { var address = timelinehost.split(":")[0]; var port = timelinehost.split(":")[1]; - Ember.Logger.log("Timeline Address from RM:" + address + ":" + port); + Ember.Logger.log("Timeline Address from RM: " + timelinehost); if(address === "0.0.0.0" || address === "localhost") { var updatedAddress = hostname + ":" + port; - - /* Timeline v2 is not supporting CORS, so make as default*/ - ENV = { - hosts: { - rmWebAddress: rmhost, - timelineWebAddress: updatedAddress, - }, - }; - Ember.Logger.log("Timeline Updated Address:" + updatedAddress); + ENV.hosts.timelineWebAddress = updatedAddress; + Ember.Logger.log("Timeline Updated Address: " + updatedAddress); } application.advanceReadiness(); - }, + } }); } else { + Ember.Logger.log("Timeline Address: " + ENV.hosts.timelineWebAddress); application.advanceReadiness(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js similarity index 94% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js index 27c48f79cb8..1cb07bbb567 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js @@ -35,6 +35,7 @@ export default DS.Model.extend({ numPendingApplications: DS.attr('number'), numActiveApplications: DS.attr('number'), users: DS.hasMany('YarnUser'), + type: DS.attr('string'), isLeafQueue: function() { var len = this.get("children.length"); @@ -59,7 +60,7 @@ export default DS.Model.extend({ value: this.get("name") === "root" ? 100 : this.get("absMaxCapacity") } ]; - }.property("absCapacity", "absUsedCapacity", "absMaxCapacity"), + }.property("absCapacity", "usedCapacity", "absMaxCapacity"), userUsagesDonutChartData: function() { var data = []; @@ -90,5 +91,5 @@ export default DS.Model.extend({ value: this.get("numActiveApplications") || 0 } ]; - }.property() + }.property("numPendingApplications", "numActiveApplications") }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fair-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fair-queue.js new file mode 100644 index 00000000000..be71362c5d4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fair-queue.js @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + name: DS.attr('string'), + children: DS.attr('array'), + parent: DS.attr('string'), + maxApps: DS.attr('number'), + minResources: DS.attr(), + maxResources: DS.attr(), + usedResources: DS.attr(), + demandResources: DS.attr(), + steadyFairResources: DS.attr(), + fairResources: DS.attr(), + clusterResources: DS.attr(), + pendingContainers: DS.attr('number'), + allocatedContainers: DS.attr('number'), + reservedContainers: DS.attr('number'), + schedulingPolicy: DS.attr('string'), + preemptable: DS.attr('number'), + numPendingApplications: DS.attr('number'), + numActiveApplications: DS.attr('number'), + type: DS.attr('string'), + + isLeafQueue: function() { + var len = this.get("children.length"); + if (!len) { + return true; + } + return len <= 0; + }.property("children"), + + capacitiesBarChartData: function() { + return [ + { + label: "Steady Fair Memory", + value: this.get("steadyFairResources.memory") + }, + { + label: "Used Memory", + value: this.get("usedResources.memory") + }, + { + label: "Maximum Memory", + value: this.get("maxResources.memory") + } + ]; + }.property("maxResources.memory", "usedResources.memory", "maxResources.memory"), + + numOfApplicationsDonutChartData: function() { + return [ + { + label: "Pending Apps", + value: this.get("numPendingApplications") || 0 // TODO, fix the REST API so root will return #applications as well. + }, + { + label: "Active Apps", + value: this.get("numActiveApplications") || 0 + } + ]; + }.property() +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fifo-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fifo-queue.js new file mode 100644 index 00000000000..2386dc4fce4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/fifo-queue.js @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + name: DS.attr('string'), + capacity: DS.attr('number'), + usedCapacity: DS.attr('number'), + state: DS.attr('string'), + minQueueMemoryCapacity: DS.attr('number'), + maxQueueMemoryCapacity: DS.attr('number'), + numNodes: DS.attr('number'), + usedNodeCapacity: DS.attr('number'), + availNodeCapacity: DS.attr('number'), + totalNodeCapacity: DS.attr('number'), + numContainers: DS.attr('number'), + type: DS.attr('string'), + + capacitiesBarChartData: function() { + return [ + { + label: "Available Capacity", + value: this.get("availNodeCapacity") + }, + { + label: "Used Capacity", + value: this.get("usedNodeCapacity") + }, + { + label: "Total Capacity", + value: this.get("totalNodeCapacity") + } + ]; + }.property("availNodeCapacity", "usedNodeCapacity", "totalNodeCapacity") + +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/yarn-queue.js new file mode 100644 index 00000000000..dcf5f48776f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/yarn-queue.js @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.Model.extend({ + type: DS.attr('string') +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js index b5db17db051..3c6abd4cfda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/cluster-overview.js @@ -28,7 +28,7 @@ export default AbstractRoute.extend({ { state: "RUNNING" }), - queues: this.store.query('yarn-queue', {}), + queues: this.store.query('yarn-queue.yarn-queue', {}), }); }, @@ -39,6 +39,6 @@ export default AbstractRoute.extend({ unloadAll() { this.store.unloadAll('ClusterMetric'); this.store.unloadAll('yarn-app'); - this.store.unloadAll('yarn-queue'); + this.store.unloadAll('yarn-queue.yarn-queue'); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js index 1c4546cae0e..cd4ed0983ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queue.js @@ -22,22 +22,28 @@ import AbstractRoute from './abstract'; export default AbstractRoute.extend({ model(param) { - return Ember.RSVP.hash({ - selected : param.queue_name, - queues: this.store.query('yarn-queue', {}), - selectedQueue : undefined, - apps: this.store.query('yarn-app', { - queue: param.queue_name - }) - }); + return Ember.RSVP.hash({ + selected : param.queue_name, + queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => { + let type = model.get('firstObject').get('type'); + return this.store.query("yarn-queue." + type + "-queue", {}); + }), + selectedQueue : undefined, + apps: this.store.query('yarn-app', { + queue: param.queue_name + }) + }); }, afterModel(model) { - model.selectedQueue = this.store.peekRecord('yarn-queue', model.selected); + var type = model.queues.get('firstObject').constructor.modelName; + model.selectedQueue = this.store.peekRecord(type, model.selected); }, unloadAll() { - this.store.unloadAll('yarn-queue'); + this.store.unloadAll('yarn-queue.capacity-queue'); + this.store.unloadAll('yarn-queue.fair-queue'); + this.store.unloadAll('yarn-queue.fifo-queue'); this.store.unloadAll('yarn-app'); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js index e4f145d00b6..7d8a200bdfb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-queues.js @@ -30,17 +30,23 @@ export default AbstractRoute.extend({ } return Ember.RSVP.hash({ selected : queueName, - queues: this.store.query('yarn-queue', {}), + queues: this.store.query("yarn-queue.yarn-queue", {}).then((model) => { + let type = model.get('firstObject').get('type'); + return this.store.query("yarn-queue." + type + "-queue", {}); + }), selectedQueue : undefined }); }, afterModel(model) { - model.selectedQueue = this.store.peekRecord('yarn-queue', model.selected); + var type = model.queues.get('firstObject').constructor.modelName; + model.selectedQueue = this.store.peekRecord(type, model.selected); }, unloadAll() { - this.store.unloadAll('yarn-queue'); + this.store.unloadAll('yarn-queue.capacity-queue'); + this.store.unloadAll('yarn-queue.fair-queue'); + this.store.unloadAll('yarn-queue.fifo-queue'); this.store.unloadAll('yarn-app'); }, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js similarity index 99% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js index 4fc1a29ad0e..c7350ef03bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js @@ -53,7 +53,6 @@ export default DS.JSONAPISerializer.extend({ }); } - var fixedPayload = { id: id, type: primaryModelClass.modelName, // yarn-queue @@ -73,6 +72,7 @@ export default DS.JSONAPISerializer.extend({ preemptionDisabled: payload.preemptionDisabled, numPendingApplications: payload.numPendingApplications, numActiveApplications: payload.numActiveApplications, + type: "capacity", }, // Relationships relationships: { @@ -81,7 +81,6 @@ export default DS.JSONAPISerializer.extend({ } } }; - return { queue: this._super(store, primaryModelClass, fixedPayload, id, requestType), includedData: includedData diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fair-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fair-queue.js new file mode 100644 index 00000000000..2215d2d41d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fair-queue.js @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + + normalizeSingleResponse(store, primaryModelClass, payload, id, + requestType) { + var children = []; + if (payload.childQueues) { + payload.childQueues.queue.forEach(function(queue) { + children.push(queue.queueName); + }); + } + + var fixedPayload = { + id: id, + type: primaryModelClass.modelName, + attributes: { + name: payload.queueName, + parent: payload.myParent, + children: children, + maxApps: payload.maxApps, + minResources: payload.minResources, + maxResources: payload.maxResources, + usedResources: payload.usedResources, + demandResources: payload.demandResources, + steadyFairResources: payload.steadyFairResources, + fairResources: payload.fairResources, + clusterResources: payload.clusterResources, + pendingContainers: payload.pendingContainers, + allocatedContainers: payload.allocatedContainers, + reservedContainers: payload.reservedContainers, + schedulingPolicy: payload.schedulingPolicy, + preemptable: payload.preemptable, + numPendingApplications: payload.numPendingApps, + numActiveApplications: payload.numActiveApps, + type: "fair", + }, + }; + return this._super(store, primaryModelClass, fixedPayload, id, requestType); + }, + + handleQueue(store, primaryModelClass, payload, id, requestType) { + var data = []; + var includedData = []; + if(!payload) return data; + var result = this.normalizeSingleResponse(store, primaryModelClass, + payload, id, requestType); + + data.push(result); + + if (payload.childQueues) { + for (var i = 0; i < payload.childQueues.queue.length; i++) { + var queue = payload.childQueues.queue[i]; + queue.myParent = payload.queueName; + var childResult = this.handleQueue(store, primaryModelClass, queue, + queue.queueName, + requestType); + + data = data.concat(childResult); + } + } + + return data; + }, + + normalizeArrayResponse(store, primaryModelClass, payload, id, requestType) { + var normalizedArrayResponse = {}; + var result = this.handleQueue(store, primaryModelClass, + payload.scheduler.schedulerInfo.rootQueue, "root", requestType); + + normalizedArrayResponse.data = result; + return normalizedArrayResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fifo-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fifo-queue.js new file mode 100644 index 00000000000..297ec182cdb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/fifo-queue.js @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + + normalizeSingleResponse(store, primaryModelClass, payload, id, + requestType) { + + var fixedPayload = { + id: id, + type: primaryModelClass.modelName, + attributes: { + name: id, + capacity: payload.capacity * 100, + usedCapacity: payload.usedCapacity * 100, + usedNodeCapacity: payload.usedNodeCapacity, + availNodeCapacity: payload.availNodeCapacity, + totalNodeCapacity: payload.totalNodeCapacity, + numNodes: payload.numNodes, + numContainers: payload.numContainers, + state: payload.qstate, + minQueueMemoryCapacity: payload.minQueueMemoryCapacity, + maxQueueMemoryCapacity: payload.maxQueueMemoryCapacity, + type: "fifo", + }, + + }; + + return this._super(store, primaryModelClass, fixedPayload, id, + requestType); + }, + + normalizeArrayResponse(store, primaryModelClass, payload, id, requestType) { + var normalizedArrayResponse = {}; + normalizedArrayResponse.data = [ + this.normalizeSingleResponse(store, primaryModelClass, + payload.scheduler.schedulerInfo, "root", requestType) + ]; + + return normalizedArrayResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/yarn-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/yarn-queue.js new file mode 100644 index 00000000000..b2e0f2f4298 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/yarn-queue.js @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + + normalizeSingleResponse(store, primaryModelClass, payload, id, + requestType) { + + var fixedPayload = { + id: id, + type: primaryModelClass.modelName, + attributes: { + type: payload.type.split(/(?=[A-Z])/)[0] + } + }; + return this._super(store, primaryModelClass, fixedPayload, id, + requestType); + }, + + normalizeArrayResponse(store, primaryModelClass, payload, id, requestType) { + var normalizedArrayResponse = {}; + + normalizedArrayResponse.data = [ + this.normalizeSingleResponse(store, primaryModelClass, + payload.scheduler.schedulerInfo, "root", requestType) + ]; + + return normalizedArrayResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs index d8dd236f6ac..e3b0a90ed74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs @@ -20,9 +20,12 @@

- {{tree-selector model=model parentId="tree-selector-container" selected=selected}} +
+ Scheduler: {{model.firstObject.type}} +
+ {{tree-selector model=model parentId="tree-selector-container" selected=selected used=used max=max}}
-{{outlet}} \ No newline at end of file +{{outlet}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-conf-table.hbs similarity index 99% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-conf-table.hbs index 17a1e1ab39e..3f6017ff9cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-configuration-table.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-conf-table.hbs @@ -51,4 +51,4 @@ {{/if}} - \ No newline at end of file + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs new file mode 100644 index 00000000000..7d44e69aee2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue-info.hbs @@ -0,0 +1,84 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ +
+
+
+ Queue Capacities: {{model.selected}} +
+
+
+ {{bar-chart data=model.selectedQueue.capacitiesBarChartData + title="" + parentId="capacity-bar-chart" + textWidth=170 + ratio=0.55 + maxHeight=350}} +
+
+
+ +
+
+
+ Queue Information: {{model.selected}} +
+ {{yarn-queue.capacity-queue-conf-table queue=model.selectedQueue}} +
+
+ +
+ +
+ +
+
+
+ Running Apps: {{model.selected}} +
+
+ {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData + showLabels=true + parentId="numapplications-donut-chart" + ratio=0.6 + maxHeight=350}} +
+
+
+ + {{#if model.selectedQueue.hasUserUsages}} +
+
+
+ User Usages: {{model.selected}} +
+
+ {{donut-chart data=model.selectedQueue.userUsagesDonutChartData + showLabels=true + parentId="userusage-donut-chart" + type="memory" + ratio=0.6 + maxHeight=350}} +
+
+
+ {{/if}} + +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs new file mode 100644 index 00000000000..8b63b661bbd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs @@ -0,0 +1,63 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{queue-navigator model=model.queues selected=model.selected + used="usedCapacity" max="absMaxCapacity"}} + +
+
+
+
+ Queue Information: {{model.selected}} +
+ {{yarn-queue.capacity-queue-conf-table queue=model.selectedQueue}} +
+
+ +
+
+
+ Queue Capacities: {{model.selected}} +
+
+
+ {{bar-chart data=model.selectedQueue.capacitiesBarChartData + title="" + parentId="capacity-bar-chart" + textWidth=175 + ratio=0.55 + maxHeight=350}} +
+
+
+ +
+
+
+ Running Apps: {{model.selected}} +
+
+ {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData + showLabels=true + parentId="numapplications-donut-chart" + ratio=0.6 + maxHeight=350}} +
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs new file mode 100644 index 00000000000..00fabcc7d9f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs @@ -0,0 +1,52 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ConfigurationsValue
Fair Memory, VCores{{queue.fairResources.memory}} MB, {{queue.fairResources.vCores}}
Minimum Memory, VCores{{queue.minResources.memory}} MB, {{queue.minResources.vCores}}
Cluster Memory, VCores{{queue.clusterResources.memory}} MB, {{queue.clusterResources.vCores}}
Pending, Allocated, Reserved Containers{{queue.pendingContainers}} , {{queue.allocatedContainers}} , {{queue.reservedContainers}}
Scheduling Policy{{queue.schedulingPolicy}}
Preemption Enabled{{queue.preemptable}}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs new file mode 100644 index 00000000000..a770bfe1641 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-info.hbs @@ -0,0 +1,66 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ +
+
+
+ Queue Capacities: {{model.selected}} +
+
+
+ {{bar-chart data=model.selectedQueue.capacitiesBarChartData + title="" + parentId="capacity-bar-chart" + textWidth=170 + ratio=0.55 + maxHeight=350}} +
+
+
+ +
+
+
+ Queue Information: {{model.selected}} +
+ {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}} +
+
+ +
+ +
+ +
+
+
+ Running Apps: {{model.selected}} +
+
+ {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData + showLabels=true + parentId="numapplications-donut-chart" + ratio=0.6 + maxHeight=350}} +
+
+
+ +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs new file mode 100644 index 00000000000..03411087986 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs @@ -0,0 +1,63 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{queue-navigator model=model.queues selected=model.selected + used="usedResources.memory" max="clusterResources.memory"}} + +
+
+
+
+ Queue Information: {{model.selected}} +
+ {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}} +
+
+ +
+
+
+ Queue Capacities: {{model.selected}} +
+
+
+ {{bar-chart data=model.selectedQueue.capacitiesBarChartData + title="" + parentId="capacity-bar-chart" + textWidth=150 + ratio=0.55 + maxHeight=350}} +
+
+
+ +
+
+
+ Running Apps: {{model.selected}} +
+
+ {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData + showLabels=true + parentId="numapplications-donut-chart" + ratio=0.6 + maxHeight=350}} +
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-conf-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-conf-table.hbs new file mode 100644 index 00000000000..4ced3e70c86 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-conf-table.hbs @@ -0,0 +1,56 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ConfigurationsValue
Configured Capacity{{queue.capacity}}
Used Capacity{{queue.usedCapacity}}
State{{queue.state}}
Minimum Queue Memory Capacity{{queue.minQueueMemoryCapacity}}
Maximum Queue Memory Capacity{{queue.maxQueueMemoryCapacity}}
Number of Nodes{{queue.numNodes}}
Number of Containers{{queue.numContainers}}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-info.hbs new file mode 100644 index 00000000000..7f4e8a7996a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue-info.hbs @@ -0,0 +1,47 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ +
+
+
+ Queue Capacities: {{model.selected}} +
+
+
+ {{bar-chart data=model.selectedQueue.capacitiesBarChartData + title="" + parentId="capacity-bar-chart" + textWidth=170 + ratio=0.55 + maxHeight=350}} +
+
+
+ +
+
+
+ Queue Information: {{model.selected}} +
+ {{yarn-queue.fifo-queue-conf-table queue=model.selectedQueue}} +
+
+ +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue.hbs new file mode 100644 index 00000000000..46d79f0e350 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fifo-queue.hbs @@ -0,0 +1,48 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{queue-navigator model=model.queues selected=model.selected + used="usedNodeCapacity" max="totalNodeCapacity"}} + +
+
+
+
+ Queue Information: {{model.selected}} +
+ {{yarn-queue.fifo-queue-conf-table queue=model.selectedQueue}} +
+
+ +
+
+
+ Queue Capacities: {{model.selected}} +
+
+
+ {{bar-chart data=model.selectedQueue.capacitiesBarChartData + title="" + parentId="capacity-bar-chart" + textWidth=150 + ratio=0.55 + maxHeight=350}} +
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs index c112ef9b3df..2f138a7eba2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue/info.hbs @@ -16,69 +16,10 @@ * limitations under the License. }} -
- -
-
-
- Queue Capacities: {{model.selected}} -
-
-
- {{bar-chart data=model.selectedQueue.capacitiesBarChartData - title="" - parentId="capacity-bar-chart" - textWidth=170 - ratio=0.55 - maxHeight=350}} -
-
-
- -
-
-
- Queue Information: {{model.selected}} -
- {{queue-configuration-table queue=model.selectedQueue}} -
-
- -
- -
- -
-
-
- Running Apps: {{model.selected}} -
-
- {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData - showLabels=true - parentId="numapplications-donut-chart" - ratio=0.6 - maxHeight=350}} -
-
-
- - {{#if model.selectedQueue.hasUserUsages}} -
-
-
- User Usages: {{model.selected}} -
-
- {{donut-chart data=model.selectedQueue.userUsagesDonutChartData - showLabels=true - parentId="userusage-donut-chart" - type="memory" - ratio=0.6 - maxHeight=350}} -
-
-
- {{/if}} - -
+{{#if (eq model.queues.firstObject.type "capacity")}} + {{yarn-queue.capacity-queue-info model=model}} +{{else if (eq model.queues.firstObject.type "fair")}} + {{yarn-queue.fair-queue-info model=model}} +{{else}} + {{yarn-queue.fifo-queue-info model=model}} +{{/if}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs index 6dfb22014eb..fccdb5b62c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs @@ -17,54 +17,14 @@ }} {{breadcrumb-bar breadcrumbs=breadcrumbs}} -
- {{queue-navigator model=model.queues selected=model.selected}} - -
- -
-
-
- Queue Information: {{model.selected}} -
- {{queue-configuration-table queue=model.selectedQueue}} -
-
- -
-
-
- Queue Capacities: {{model.selected}} -
-
-
- {{bar-chart data=model.selectedQueue.capacitiesBarChartData - title="" - parentId="capacity-bar-chart" - textWidth=150 - ratio=0.55 - maxHeight=350}} -
-
-
- -
-
-
- Running Apps: {{model.selected}} -
-
- {{donut-chart data=model.selectedQueue.numOfApplicationsDonutChartData - showLabels=true - parentId="numapplications-donut-chart" - ratio=0.6 - maxHeight=350}} -
-
-
- -
+ {{#if (eq model.queues.firstObject.type "capacity")}} + {{yarn-queue.capacity-queue model=model}} + {{else if (eq model.queues.firstObject.type "fair")}} + {{yarn-queue.fair-queue model=model}} + {{else}} + {{yarn-queue.fifo-queue model=model}} + {{/if}}
{{outlet}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js index 6c0cfee214c..af0cdf4dd63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/color-utils.js @@ -55,7 +55,6 @@ export default { } } - console.log(colors); return colors; }, diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index 52d902871a5..12e46049a77 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -75,7 +75,7 @@ org.apache.maven.plugins maven-javadoc-plugin - org.apache.hadoop.yarn.proto + org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto @@ -155,14 +155,6 @@ ${project.build.directory} hadoop-annotations.jar - - xerces - xercesImpl - ${xerces.version.jdiff} - false - ${project.build.directory} - xerces.jar - @@ -201,7 +193,7 @@ sourceFiles="${dev-support.relative.dir}/jdiff/Null.java" maxmemory="${jdiff.javadoc.maxmemory}"> + path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar"> diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index df87417a4bb..2798192e529 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -78,6 +78,10 @@ org.apache.hadoop hadoop-yarn-server-timelineservice-hbase + + org.apache.hadoop + hadoop-yarn-server-router + diff --git a/pom.xml b/pom.xml index 29524a40118..22a4b59a768 100644 --- a/pom.xml +++ b/pom.xml @@ -97,7 +97,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.7 2.4 2.10 - 1.4.1 + 3.0.0-M1 2.10.4 1.5 1.5 @@ -389,9 +389,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs attach-descriptor - - true - diff --git a/start-build-env.sh b/start-build-env.sh index 18e3a8c38c4..94af7e44f2c 100755 --- a/start-build-env.sh +++ b/start-build-env.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with