Merge branch 'yarn-native-services' into trunk

This commit is contained in:
Jian He 2017-11-06 14:02:19 -08:00
commit cbc632d9ab
334 changed files with 33541 additions and 322 deletions

View File

@ -1777,6 +1777,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The binary distribution of this product bundles these dependencies under the
following license:
FindBugs-jsr305 3.0.0
dnsjava 2.1.7, Copyright (c) 1998-2011, Brian Wellington. All rights reserved.
--------------------------------------------------------------------------------
(2-clause BSD)
Redistribution and use in source and binary forms, with or without

View File

@ -581,3 +581,13 @@ The binary distribution of this product bundles binaries of
Ehcache 3.3.1,
which has the following notices:
* Ehcache V3 Copyright 2014-2016 Terracotta, Inc.
The binary distribution of this product bundles binaries of
snakeyaml (https://bitbucket.org/asomov/snakeyaml),
which has the following notices:
* Copyright (c) 2008, http://www.snakeyaml.org
The binary distribution of this product bundles binaries of
swagger-annotations (https://github.com/swagger-api/swagger-core),
which has the following notices:
* Copyright 2016 SmartBear Software

View File

@ -86,6 +86,31 @@
<include>*-sources.jar</include>
</includes>
</fileSet>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/target</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
<includes>
<include>*-sources.jar</include>
</includes>
</fileSet>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf</directory>
<outputDirectory>etc/hadoop</outputDirectory>
</fileSet>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/yarn-service-examples</outputDirectory>
<includes>
<include>**/*</include>
</includes>
</fileSet>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/target</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
<includes>
<include>*-sources.jar</include>
</includes>
</fileSet>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/target</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>

View File

@ -89,6 +89,7 @@
<curator.version>2.12.0</curator.version>
<findbugs.version>3.0.0</findbugs.version>
<spotbugs.version>3.1.0-RC1</spotbugs.version>
<dnsjava.version>2.1.7</dnsjava.version>
<guava.version>11.0.2</guava.version>
<guice.version>4.0</guice.version>
@ -142,6 +143,9 @@
<!-- the version of Hadoop declared in the version resources; can be overridden
so that Hadoop 3.x can declare itself a 2.x artifact. -->
<declared.hadoop.version>${project.version}</declared.hadoop.version>
<swagger-annotations-version>1.5.4</swagger-annotations-version>
<snakeyaml.version>1.16</snakeyaml.version>
</properties>
<dependencyManagement>
@ -422,6 +426,12 @@
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-services-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
@ -597,6 +607,11 @@
<artifactId>javax.servlet-api</artifactId>
<version>3.1.0</version>
</dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-server</artifactId>
@ -956,11 +971,6 @@
<artifactId>jackson-module-jaxb-annotations</artifactId>
<version>${jackson2.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
<version>${jackson2.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-cbor</artifactId>
@ -1213,6 +1223,13 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>dnsjava</groupId>
<artifactId>dnsjava</artifactId>
<version>${dnsjava.version}</version>
</dependency>
<dependency>
<!-- HACK. Transitive dependency for nimbus-jose-jwt. Needed for
packaging. Please re-check this version when updating
@ -1319,6 +1336,23 @@
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.version}</version>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-annotations</artifactId>
<version>${swagger-annotations-version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
<version>${jackson2.version}</version>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>${snakeyaml.version}</version>
</dependency>
</dependencies>
</dependencyManagement>

View File

@ -154,7 +154,15 @@
<item name="Timeline Server" href="hadoop-yarn/hadoop-yarn-site/TimelineServer.html#Timeline_Server_REST_API_v1"/>
<item name="Timeline Service V.2" href="hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html#Timeline_Service_v.2_REST_API"/>
</menu>
<menu name="YARN Service" inherit="top">
<item name="Overview" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Overview.html"/>
<item name="QuickStart" href="hadoop-yarn/hadoop-yarn-site/yarn-service/QuickStart.html"/>
<item name="Concepts" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Concepts.html"/>
<item name="Yarn Service API" href="hadoop-yarn/hadoop-yarn-site/yarn-service/YarnServiceAPI.html"/>
<item name="Service Discovery" href="hadoop-yarn/hadoop-yarn-site/yarn-service/ServiceDiscovery.html"/>
</menu>
<menu name="Hadoop Compatible File Systems" inherit="top">
<item name="Aliyun OSS" href="hadoop-aliyun/tools/hadoop-aliyun/index.html"/>
<item name="Amazon S3" href="hadoop-aws/tools/hadoop-aws/index.html"/>

View File

@ -31,7 +31,7 @@ function hadoop_usage
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
hadoop_add_option "--workers" "turn on worker mode"
hadoop_add_subcommand "application" client "prints application(s) report/kill application"
hadoop_add_subcommand "app|application" client "prints application(s) report/kill application/manage long running application"
hadoop_add_subcommand "applicationattempt" client "prints applicationattempt(s) report"
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
hadoop_add_subcommand "cluster" client "prints cluster information"
@ -44,6 +44,7 @@ function hadoop_usage
hadoop_add_subcommand "nodemanager" daemon "run a nodemanager on each worker"
hadoop_add_subcommand "proxyserver" daemon "run the web app proxy server"
hadoop_add_subcommand "queue" client "prints queue information"
hadoop_add_subcommand "registrydns" daemon "run the registry DNS server"
hadoop_add_subcommand "resourcemanager" daemon "run the ResourceManager"
hadoop_add_subcommand "rmadmin" admin "admin tools"
hadoop_add_subcommand "router" daemon "run the Router daemon"
@ -68,10 +69,18 @@ function yarncmd_case
shift
case ${subcmd} in
application|applicationattempt|container)
app|application|applicationattempt|container)
HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ApplicationCLI
set -- "${subcmd}" "$@"
HADOOP_SUBCMD_ARGS=("$@")
local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
${HADOOP_HDFS_HOME}/${HDFS_DIR},\
${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
hadoop_translate_cygwin_path sld
hadoop_add_param HADOOP_OPTS service.libdir "-Dservice.libdir=${sld}"
;;
classpath)
hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
@ -128,6 +137,11 @@ function yarncmd_case
queue)
HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.QueueCLI
;;
registrydns)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_SECURE_CLASSNAME='org.apache.hadoop.registry.server.dns.PrivilegedRegistryDNSStarter'
HADOOP_CLASSNAME='org.apache.hadoop.registry.server.dns.RegistryDNSServer'
;;
resourcemanager)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'

View File

@ -148,3 +148,21 @@
# See ResourceManager for some examples
#
#export YARN_ROUTER_OPTS=
###
# Registry DNS specific parameters
###
# For privileged registry DNS, user to run as after dropping privileges
# This will replace the hadoop.id.str Java property in secure mode.
# export YARN_REGISTRYDNS_SECURE_USER=yarn
# Supplemental options for privileged registry DNS
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export YARN_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server"
###
# YARN Services parameters
###
# Directory containing service examples
# export YARN_SERVICE_EXAMPLES_DIR = $HADOOP_YARN_HOME/share/hadoop/yarn/yarn-service-examples

View File

@ -629,8 +629,18 @@
</Match>
<Match>
<Class name="org.apache.hadoop.yarn.api.records.Resource" />
<Method name="getResources" />
<Bug pattern="EI_EXPOSE_REP" />
<Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
<Method name="addNIOTCP" />
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
</Match>
<Match>
<Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
<Method name="addNIOUDP" />
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
</Match>
<Match>
<Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
<Method name="serveNIOTCP" />
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
</Match>
</FindBugsFilter>

View File

@ -336,6 +336,8 @@ public class YarnConfiguration extends Configuration {
public static final String YARN_WEBAPP_UI2_WARFILE_PATH = "yarn."
+ "webapp.ui2.war-file-path";
public static final String YARN_API_SERVICES_ENABLE = "yarn."
+ "webapp.api-service.enable";
public static final String RM_RESOURCE_TRACKER_ADDRESS =
RM_PREFIX + "resource-tracker.address";

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
</FindBugsFilter>

View File

@ -0,0 +1,130 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-applications</artifactId>
<version>3.1.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-yarn-services-api</artifactId>
<name>Apache Hadoop YARN Services API</name>
<packaging>jar</packaging>
<description>Hadoop YARN REST APIs for services</description>
<build>
<!-- resources are filtered for dynamic updates. This gets build info in-->
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
</resource>
<resource>
<directory>src/main/scripts/</directory>
<filtering>true</filtering>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<!-- The configuration of the plugin -->
<configuration>
<!-- Configuration of the archiver -->
<archive>
<manifestEntries>
<mode>development</mode>
<url>${project.url}</url>
</manifestEntries>
<!-- Manifest specific configuration -->
<manifest>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<reporting>
</reporting>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-services-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-webapp</artifactId>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
</dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<!-- ======================================================== -->
<!-- Test dependencies -->
<!-- ======================================================== -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,298 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.webapp;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.ServiceState;
import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
import org.apache.hadoop.yarn.service.client.ServiceClient;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import static org.apache.hadoop.yarn.service.api.records.ServiceState.ACCEPTED;
import static org.apache.hadoop.yarn.service.conf.RestApiConstants.*;
/**
* The rest API endpoints for users to manage services on YARN.
*/
@Singleton
@Path(CONTEXT_ROOT)
public class ApiServer {
public ApiServer() {
super();
}
@Inject
public ApiServer(Configuration conf) {
super();
}
private static final Logger LOG =
LoggerFactory.getLogger(ApiServer.class);
private static Configuration YARN_CONFIG = new YarnConfiguration();
private static ServiceClient SERVICE_CLIENT;
static {
init();
}
// initialize all the common resources - order is important
private static void init() {
SERVICE_CLIENT = new ServiceClient();
SERVICE_CLIENT.init(YARN_CONFIG);
SERVICE_CLIENT.start();
}
@GET
@Path(VERSION)
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public Response getVersion() {
String version = VersionInfo.getBuildVersion();
LOG.info(version);
return Response.ok("{ \"hadoop_version\": \"" + version + "\"}").build();
}
@POST
@Path(SERVICE_ROOT_PATH)
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public Response createService(Service service) {
LOG.info("POST: createService = {}", service);
ServiceStatus serviceStatus = new ServiceStatus();
try {
ApplicationId applicationId = SERVICE_CLIENT.actionCreate(service);
LOG.info("Successfully created service " + service.getName()
+ " applicationId = " + applicationId);
serviceStatus.setState(ACCEPTED);
serviceStatus.setUri(
CONTEXT_ROOT + SERVICE_ROOT_PATH + "/" + service
.getName());
return Response.status(Status.ACCEPTED).entity(serviceStatus).build();
} catch (IllegalArgumentException e) {
serviceStatus.setDiagnostics(e.getMessage());
return Response.status(Status.BAD_REQUEST).entity(serviceStatus)
.build();
} catch (Exception e) {
String message = "Failed to create service " + service.getName();
LOG.error(message, e);
serviceStatus.setDiagnostics(message + ": " + e.getMessage());
return Response.status(Status.INTERNAL_SERVER_ERROR)
.entity(serviceStatus).build();
}
}
@GET
@Path(SERVICE_PATH)
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public Response getService(@PathParam(SERVICE_NAME) String appName) {
LOG.info("GET: getService for appName = {}", appName);
ServiceStatus serviceStatus = new ServiceStatus();
try {
Service app = SERVICE_CLIENT.getStatus(appName);
return Response.ok(app).build();
} catch (IllegalArgumentException e) {
serviceStatus.setDiagnostics(e.getMessage());
serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
return Response.status(Status.NOT_FOUND).entity(serviceStatus)
.build();
} catch (Exception e) {
LOG.error("Get service failed", e);
serviceStatus
.setDiagnostics("Failed to retrieve service: " + e.getMessage());
return Response.status(Status.INTERNAL_SERVER_ERROR)
.entity(serviceStatus).build();
}
}
@DELETE
@Path(SERVICE_PATH)
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public Response deleteService(@PathParam(SERVICE_NAME) String appName) {
LOG.info("DELETE: deleteService for appName = {}", appName);
return stopService(appName, true);
}
private Response stopService(String appName, boolean destroy) {
try {
SERVICE_CLIENT.actionStop(appName, destroy);
if (destroy) {
SERVICE_CLIENT.actionDestroy(appName);
LOG.info("Successfully deleted service {}", appName);
} else {
LOG.info("Successfully stopped service {}", appName);
}
return Response.status(Status.OK).build();
} catch (ApplicationNotFoundException e) {
ServiceStatus serviceStatus = new ServiceStatus();
serviceStatus.setDiagnostics(
"Service " + appName + " is not found in YARN: " + e.getMessage());
return Response.status(Status.BAD_REQUEST).entity(serviceStatus)
.build();
} catch (Exception e) {
ServiceStatus serviceStatus = new ServiceStatus();
serviceStatus.setDiagnostics(e.getMessage());
return Response.status(Status.INTERNAL_SERVER_ERROR)
.entity(serviceStatus).build();
}
}
@PUT
@Path(COMPONENT_PATH)
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN })
public Response updateComponent(@PathParam(SERVICE_NAME) String appName,
@PathParam(COMPONENT_NAME) String componentName, Component component) {
if (component.getNumberOfContainers() < 0) {
return Response.status(Status.BAD_REQUEST).entity(
"Service = " + appName + ", Component = " + component.getName()
+ ": Invalid number of containers specified " + component
.getNumberOfContainers()).build();
}
ServiceStatus status = new ServiceStatus();
try {
Map<String, Long> original = SERVICE_CLIENT.flexByRestService(appName,
Collections.singletonMap(component.getName(),
component.getNumberOfContainers()));
status.setDiagnostics(
"Updating component (" + componentName + ") size from " + original
.get(componentName) + " to " + component.getNumberOfContainers());
return Response.ok().entity(status).build();
} catch (YarnException | IOException e) {
status.setDiagnostics(e.getMessage());
return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status)
.build();
}
}
@PUT
@Path(SERVICE_PATH)
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
public Response updateService(@PathParam(SERVICE_NAME) String appName,
Service updateServiceData) {
LOG.info("PUT: updateService for app = {} with data = {}", appName,
updateServiceData);
// Ignore the app name provided in updateServiceData and always use appName
// path param
updateServiceData.setName(appName);
// For STOP the app should be running. If already stopped then this
// operation will be a no-op. For START it should be in stopped state.
// If already running then this operation will be a no-op.
if (updateServiceData.getState() != null
&& updateServiceData.getState() == ServiceState.STOPPED) {
return stopService(appName, false);
}
// If a START is requested
if (updateServiceData.getState() != null
&& updateServiceData.getState() == ServiceState.STARTED) {
return startService(appName);
}
// If new lifetime value specified then update it
if (updateServiceData.getLifetime() != null
&& updateServiceData.getLifetime() > 0) {
return updateLifetime(appName, updateServiceData);
}
// If nothing happens consider it a no-op
return Response.status(Status.NO_CONTENT).build();
}
private Response updateLifetime(String appName, Service updateAppData) {
ServiceStatus status = new ServiceStatus();
try {
String newLifeTime =
SERVICE_CLIENT.updateLifetime(appName, updateAppData.getLifetime());
status.setDiagnostics(
"Service (" + appName + ")'s lifeTime is updated to " + newLifeTime
+ ", " + updateAppData.getLifetime()
+ " seconds remaining");
return Response.ok(status).build();
} catch (Exception e) {
String message =
"Failed to update service (" + appName + ")'s lifetime to "
+ updateAppData.getLifetime();
LOG.error(message, e);
status.setDiagnostics(message + ": " + e.getMessage());
return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status)
.build();
}
}
private Response startService(String appName) {
ServiceStatus status = new ServiceStatus();
try {
SERVICE_CLIENT.actionStart(appName);
LOG.info("Successfully started service " + appName);
status.setDiagnostics("Service " + appName + " is successfully started.");
status.setState(ServiceState.ACCEPTED);
return Response.ok(status).build();
} catch (Exception e) {
String message = "Failed to start service " + appName;
status.setDiagnostics(message + ": " + e.getMessage());
LOG.info(message, e);
return Response.status(Status.INTERNAL_SERVER_ERROR)
.entity(status).build();
}
}
/**
* Used by negative test case.
*
* @param mockServerClient - A mocked version of ServiceClient
*/
public static void setServiceClient(ServiceClient mockServerClient) {
SERVICE_CLIENT = mockServerClient;
SERVICE_CLIENT.init(YARN_CONFIG);
SERVICE_CLIENT.start();
}
}

View File

@ -0,0 +1,161 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.webapp;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.eclipse.jetty.webapp.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
/**
* This class launches the web service using Hadoop HttpServer2 (which uses
* an embedded Jetty container). This is the entry point to your service.
* The Java command used to launch this app should call the main method.
*/
public class ApiServerWebApp extends AbstractService {
private static final Logger logger = LoggerFactory
.getLogger(ApiServerWebApp.class);
private static final String SEP = ";";
// REST API server for YARN native services
private HttpServer2 apiServer;
private InetSocketAddress bindAddress;
public static void main(String[] args) throws IOException {
ApiServerWebApp apiWebApp = new ApiServerWebApp();
try {
apiWebApp.init(new YarnConfiguration());
apiWebApp.serviceStart();
} catch (Exception e) {
logger.error("Got exception starting", e);
apiWebApp.close();
}
}
public ApiServerWebApp() {
super(ApiServerWebApp.class.getName());
}
@Override
protected void serviceStart() throws Exception {
bindAddress = getConfig().getSocketAddr(API_SERVER_ADDRESS,
DEFAULT_API_SERVER_ADDRESS, DEFAULT_API_SERVER_PORT);
logger.info("YARN API server running on " + bindAddress);
if (UserGroupInformation.isSecurityEnabled()) {
doSecureLogin(getConfig());
}
startWebApp();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (apiServer != null) {
apiServer.stop();
}
super.serviceStop();
}
private void doSecureLogin(org.apache.hadoop.conf.Configuration conf)
throws IOException {
SecurityUtil.login(conf, YarnConfiguration.RM_KEYTAB,
YarnConfiguration.RM_PRINCIPAL, bindAddress.getHostName());
addFilters(conf);
}
private void addFilters(org.apache.hadoop.conf.Configuration conf) {
// Always load pseudo authentication filter to parse "user.name" in an URL
// to identify a HTTP request's user.
boolean hasHadoopAuthFilterInitializer = false;
String filterInitializerConfKey = "hadoop.http.filter.initializers";
Class<?>[] initializersClasses =
conf.getClasses(filterInitializerConfKey);
List<String> targets = new ArrayList<String>();
if (initializersClasses != null) {
for (Class<?> initializer : initializersClasses) {
if (initializer.getName().equals(
AuthenticationFilterInitializer.class.getName())) {
hasHadoopAuthFilterInitializer = true;
break;
}
targets.add(initializer.getName());
}
}
if (!hasHadoopAuthFilterInitializer) {
targets.add(AuthenticationFilterInitializer.class.getName());
conf.set(filterInitializerConfKey, StringUtils.join(",", targets));
}
}
private void startWebApp() throws IOException {
URI uri = URI.create("http://" + NetUtils.getHostPortString(bindAddress));
apiServer = new HttpServer2.Builder()
.setName("api-server")
.setConf(getConfig())
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
.setUsernameConfKey(RM_WEBAPP_SPNEGO_USER_NAME_KEY)
.setKeytabConfKey(RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.addEndpoint(uri).build();
String apiPackages =
ApiServer.class.getPackage().getName() + SEP
+ GenericExceptionHandler.class.getPackage().getName() + SEP
+ YarnJacksonJaxbJsonProvider.class.getPackage().getName();
apiServer.addJerseyResourcePackage(apiPackages, "/*");
try {
logger.info("Service starting up. Logging start...");
apiServer.start();
logger.info("Server status = {}", apiServer.toString());
for (Configuration conf : apiServer.getWebAppContext()
.getConfigurations()) {
logger.info("Configurations = {}", conf);
}
logger.info("Context Path = {}", Collections.singletonList(
apiServer.getWebAppContext().getContextPath()));
logger.info("ResourceBase = {}", Collections.singletonList(
apiServer.getWebAppContext().getResourceBase()));
logger.info("War = {}", Collections
.singletonList(apiServer.getWebAppContext().getWar()));
} catch (Exception ex) {
logger.error("Hadoop HttpServer2 App **failed**", ex);
throw ex;
}
}
}

View File

@ -0,0 +1,245 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
## Examples
### Create a simple single-component service with most attribute values as defaults
POST URL - http://localhost:9191/ws/v1/services
##### POST Request JSON
```json
{
"name": "hello-world",
"components" :
[
{
"name": "hello",
"number_of_containers": 1,
"artifact": {
"id": "nginx:latest",
"type": "DOCKER"
},
"launch_command": "./start_nginx.sh",
"resource": {
"cpus": 1,
"memory": "256"
}
}
]
}
```
##### GET Response JSON
GET URL - http://localhost:9191/ws/v1/services/hello-world
Note, lifetime value of -1 means unlimited lifetime.
```json
{
"name": "hello-world",
"id": "application_1503963985568_0002",
"lifetime": -1,
"components": [
{
"name": "hello",
"dependencies": [],
"resource": {
"cpus": 1,
"memory": "256"
},
"configuration": {
"properties": {},
"env": {},
"files": []
},
"quicklinks": [],
"containers": [
{
"id": "container_e03_1503963985568_0002_01_000001",
"ip": "10.22.8.143",
"hostname": "myhost.local",
"state": "READY",
"launch_time": 1504051512412,
"bare_host": "10.22.8.143",
"component_name": "hello-0"
},
{
"id": "container_e03_1503963985568_0002_01_000002",
"ip": "10.22.8.143",
"hostname": "myhost.local",
"state": "READY",
"launch_time": 1504051536450,
"bare_host": "10.22.8.143",
"component_name": "hello-1"
}
],
"launch_command": "./start_nginx.sh",
"number_of_containers": 1,
"run_privileged_container": false
}
],
"configuration": {
"properties": {},
"env": {},
"files": []
},
"quicklinks": {}
}
```
### Update to modify the lifetime of a service
PUT URL - http://localhost:9191/ws/v1/services/hello-world
##### PUT Request JSON
Note, irrespective of what the current lifetime value is, this update request will set the lifetime of the service to be 3600 seconds (1 hour) from the time the request is submitted. Hence, if a a service has remaining lifetime of 5 mins (say) and would like to extend it to an hour OR if an application has remaining lifetime of 5 hours (say) and would like to reduce it down to an hour, then for both scenarios you need to submit the same request below.
```json
{
"lifetime": 3600
}
```
### Stop a service
PUT URL - http://localhost:9191/ws/v1/services/hello-world
##### PUT Request JSON
```json
{
"state": "STOPPED"
}
```
### Start a service
PUT URL - http://localhost:9191/ws/v1/services/hello-world
##### PUT Request JSON
```json
{
"state": "STARTED"
}
```
### Update to flex up/down the no of containers (instances) of a component of a service
PUT URL - http://localhost:9191/ws/v1/services/hello-world/components/hello
##### PUT Request JSON
```json
{
"name": "hello",
"number_of_containers": 3
}
```
### Destroy a service
DELETE URL - http://localhost:9191/ws/v1/services/hello-world
***
### Create a complicated service - HBase
POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
##### POST Request JSON
```json
{
"name": "hbase-app-1",
"lifetime": "3600",
"components": [
{
"name": "hbasemaster",
"number_of_containers": 1,
"artifact": {
"id": "hbase:latest",
"type": "DOCKER"
},
"launch_command": "/usr/hdp/current/hbase-master/bin/hbase master start",
"resource": {
"cpus": 1,
"memory": "2048"
},
"configuration": {
"env": {
"HBASE_LOG_DIR": "<LOG_DIR>"
},
"files": [
{
"type": "XML",
"dest_file": "/etc/hadoop/conf/core-site.xml",
"properties": {
"fs.defaultFS": "${CLUSTER_FS_URI}"
}
},
{
"type": "XML",
"dest_file": "/etc/hbase/conf/hbase-site.xml",
"properties": {
"hbase.cluster.distributed": "true",
"hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
"hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
"zookeeper.znode.parent": "${SERVICE_ZK_PATH}",
"hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}",
"hbase.master.info.port": "16010"
}
}
]
}
},
{
"name": "regionserver",
"number_of_containers": 3,
"unique_component_support": "true",
"artifact": {
"id": "hbase:latest",
"type": "DOCKER"
},
"launch_command": "/usr/hdp/current/hbase-regionserver/bin/hbase regionserver start",
"resource": {
"cpus": 1,
"memory": "2048"
},
"configuration": {
"env": {
"HBASE_LOG_DIR": "<LOG_DIR>"
},
"files": [
{
"type": "XML",
"dest_file": "/etc/hadoop/conf/core-site.xml",
"properties": {
"fs.defaultFS": "${CLUSTER_FS_URI}"
}
},
{
"type": "XML",
"dest_file": "/etc/hbase/conf/hbase-site.xml",
"properties": {
"hbase.cluster.distributed": "true",
"hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
"hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
"zookeeper.znode.parent": "${SERVICE_ZK_PATH}",
"hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}",
"hbase.master.info.port": "16010",
"hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}"
}
}
]
}
}
],
"quicklinks": {
"HBase Master Status UI": "http://hbasemaster0.${SERVICE_NAME}.${USER}.${DOMAIN}:16010/master-status",
"Proxied HBase Master Status UI": "http://app-proxy/${DOMAIN}/${USER}/${SERVICE_NAME}/hbasemaster/16010/"
}
}
```

View File

@ -0,0 +1,471 @@
# Hadoop YARN REST APIs for services v1 spec in YAML
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
swagger: '2.0'
info:
title: "YARN Simplified API layer for services"
description: |
Bringing a new service on YARN today is not a simple experience. The APIs of existing
frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs)
or writing a complex spec (for declarative frameworks).
This simplified REST API can be used to create and manage the lifecycle of YARN services.
In most cases, the application owner will not be forced to make any changes to their applications.
This is primarily true if the application is packaged with containerization technologies like Docker.
This document describes the API specifications (aka. YarnFile) for deploying/managing
containerized services on YARN. The same JSON spec can be used for both REST API
and CLI to manage the services.
version: "1.0.0"
license:
name: Apache 2.0
url: http://www.apache.org/licenses/LICENSE-2.0.html
# the domain of the service
host: host.mycompany.com
port: 9191(default)
# array of all schemes that your API supports
schemes:
- http
consumes:
- application/json
produces:
- application/json
paths:
/ws/v1/services/version:
get:
summary: Get current version of the API server.
description: Get current version of the API server.
responses:
200:
description: Successful request
/ws/v1/services:
get:
summary: (TBD) List of services running in the cluster.
description: Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name.
responses:
200:
description: An array of services
schema:
type: array
items:
$ref: '#/definitions/Service'
default:
description: Unexpected error
schema:
$ref: '#/definitions/ServiceStatus'
post:
summary: Create a service
description: Create a service. The request JSON is a service object with details required for creation. If the request is successful it returns 202 Accepted. A success of this API only confirms success in submission of the service creation request. There is no guarantee that the service will actually reach a RUNNING state. Resource availability and several other factors determines if the service will be deployed in the cluster. It is expected that clients would subsequently call the GET API to get details of the service and determine its state.
parameters:
- name: Service
in: body
description: Service request object
required: true
schema:
$ref: '#/definitions/Service'
responses:
202:
description: The request to create a service is accepted
400:
description: Invalid service definition provided in the request body
500:
description: Failed to create a service
default:
description: Unexpected error
schema:
$ref: '#/definitions/ServiceStatus'
/ws/v1/services/{service_name}:
put:
summary: Update a service or upgrade the binary version of the components of a running service
description: Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service.
The PUT operation is also used to orchestrate an upgrade of the service containers to a newer version of their artifacts (TBD).
parameters:
- name: service_name
in: path
description: Service name
required: true
type: string
- name: Service
in: body
description: The updated service definition. It can contain the updated lifetime of a service or the desired state (STOPPED/STARTED) of a service to initiate a start/stop operation against the specified service
required: true
schema:
$ref: '#/definitions/Service'
responses:
204:
description: Update or upgrade was successful
404:
description: Service does not exist
default:
description: Unexpected error
schema:
$ref: '#/definitions/ServiceStatus'
delete:
summary: Destroy a service
description: Destroy a service and release all resources. This API might have to return JSON data providing location of logs (TBD), etc.
parameters:
- name: service_name
in: path
description: Service name
required: true
type: string
responses:
204:
description: Destroy was successful
404:
description: Service does not exist
default:
description: Unexpected error
schema:
$ref: '#/definitions/ServiceStatus'
get:
summary: Get details of a service.
description: Return the details (including containers) of a running service
parameters:
- name: service_name
in: path
description: Service name
required: true
type: string
responses:
200:
description: a service object
schema:
type: object
items:
$ref: '#/definitions/Service'
examples:
service_name: logsearch
artifact:
id: logsearch:latest
type: docker
404:
description: Service does not exist
default:
description: Unexpected error
schema:
$ref: '#/definitions/ServiceStatus'
/ws/v1/services/{service_name}/components/{component_name}:
put:
summary: Flex a component's number of instances.
description: Set a component's desired number of instanes
parameters:
- name: service_name
in: path
description: Service name
required: true
type: string
- name: component_name
in: path
description: Component name
required: true
type: string
- name: Component
in: body
description: The definition of a component which contains the updated number of instances.
required: true
schema:
$ref: '#/definitions/Component'
responses:
200:
description: Flex was successful
404:
description: Service does not exist
default:
description: Unexpected error
schema:
$ref: '#/definitions/ServiceStatus'
definitions:
Service:
description: a service resource has the following attributes.
required:
- name
properties:
name:
type: string
description: A unique service name. If Registry DNS is enabled, the max length is 63 characters.
id:
type: string
description: A unique service id.
artifact:
description: The default artifact for all components of the service except the components which has Artifact type set to SERVICE (optional).
$ref: '#/definitions/Artifact'
resource:
description: The default resource for all components of the service (optional).
$ref: '#/definitions/Resource'
launch_time:
type: string
format: date
description: The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.
number_of_running_containers:
type: integer
format: int64
description: In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.
lifetime:
type: integer
format: int64
description: Life time (in seconds) of the service from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.
placement_policy:
description: (TBD) Advanced scheduling and placement policies. If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.
$ref: '#/definitions/PlacementPolicy'
components:
description: Components of a service.
type: array
items:
$ref: '#/definitions/Component'
configuration:
description: Config properties of a service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.
$ref: '#/definitions/Configuration'
state:
description: State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state.
$ref: '#/definitions/ServiceState'
quicklinks:
type: object
description: A blob of key-value pairs of quicklinks to be exported for a service.
additionalProperties:
type: string
queue:
type: string
description: The YARN queue that this service should be submitted to.
Resource:
description:
Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.
properties:
profile:
type: string
description: Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.
cpus:
type: integer
format: int32
description: Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).
memory:
type: string
description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.
PlacementPolicy:
description: Placement policy of an instance of a service. This feature is in the works in YARN-6592.
properties:
label:
type: string
description: Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.
Artifact:
description: Artifact of a service component. If not specified, component will just run the bare launch command and no artifact will be localized.
required:
- id
properties:
id:
type: string
description: Artifact id. Examples are package location uri for tarball based services, image name for docker, name of service, etc.
type:
type: string
description: Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For SERVICE type, the service specified will be read and its components will be added into this service. The original component with artifact type SERVICE will be removed (any properties specified in the original component will be ignored).
enum:
- DOCKER
- TARBALL
- SERVICE
default: DOCKER
uri:
type: string
description: Artifact location to support multiple artifact stores (optional).
Component:
description: One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.
required:
- name
properties:
name:
type: string
description: Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters.
state:
description: The state of the component
$ref: "#/definitions/ComponentState"
dependencies:
type: array
items:
type: string
description: An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG.
readiness_check:
description: Readiness check for this component.
$ref: '#/definitions/ReadinessCheck'
artifact:
description: Artifact of the component (optional). If not specified, the service level global artifact takes effect.
$ref: '#/definitions/Artifact'
launch_command:
type: string
description: The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any).
resource:
description: Resource of this component (optional). If not specified, the service level global resource takes effect.
$ref: '#/definitions/Resource'
number_of_containers:
type: integer
format: int64
description: Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.
run_privileged_container:
type: boolean
description: Run all containers of this component in privileged mode (YARN-4262).
placement_policy:
description: Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details.
$ref: '#/definitions/PlacementPolicy'
configuration:
description: Config properties for this component.
$ref: '#/definitions/Configuration'
quicklinks:
type: array
items:
type: string
description: A list of quicklink keys defined at the service level, and to be resolved by this component.
ReadinessCheck:
description: A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.
required:
- type
properties:
type:
type: string
description: E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).
enum:
- HTTP
- PORT
properties:
type: object
description: A blob of key value pairs that will be used to configure the check.
additionalProperties:
type: string
artifact:
description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET
$ref: '#/definitions/Artifact'
Configuration:
description: Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.
properties:
properties:
type: object
description: A blob of key-value pairs for configuring the YARN service AM
additionalProperties:
type: string
env:
type: object
description: A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.
additionalProperties:
type: string
files:
description: Array of list of files that needs to be created and made available as volumes in the service component containers.
type: array
items:
$ref: '#/definitions/ConfigFile'
ConfigFile:
description: A config file that needs to be created and made available as a volume in a service component container.
properties:
type:
type: string
description: Config file in the standard format like xml, properties, json, yaml, template.
enum:
- XML
- PROPERTIES
- JSON
- YAML
- TEMPLATE
- ENV
- HADOOP_XML
dest_file:
type: string
description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers. If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.
src_file:
type: string
description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.
properties:
type: object
description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.
Container:
description: An instance of a running service container.
properties:
id:
type: string
description: Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.
launch_time:
type: string
format: date
description: The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.
ip:
type: string
description: IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.
hostname:
type: string
description: Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.
bare_host:
type: string
description: The bare node or host in which the container is running, e.g. cn008.example.com.
state:
description: State of the container of a service.
$ref: '#/definitions/ContainerState'
component_instance_name:
type: string
description: Name of the component instance that this container instance belongs to. Component instance name is named as $COMPONENT_NAME-i, where i is a
monotonically increasing integer. E.g. A componet called nginx can have multiple component instances named as nginx-0, nginx-1 etc.
Each component instance is backed by a container instance.
resource:
description: Resource used for this container.
$ref: '#/definitions/Resource'
artifact:
description: Artifact used for this container.
$ref: '#/definitions/Artifact'
privileged_container:
type: boolean
description: Container running in privileged mode or not.
ServiceState:
description: The current state of a service.
properties:
state:
type: string
description: enum of the state of the service
enum:
- ACCEPTED
- STARTED
- STABLE
- STOPPED
- FAILED
ContainerState:
description: The current state of the container of a service.
properties:
state:
type: string
description: enum of the state of the container
enum:
- INIT
- STARTED
- READY
ComponentState:
description: The state of the component
properties:
state:
type: string
description: enum of the state of the component
enum:
- FLEXING
- STABLE
ServiceStatus:
description: The current status of a submitted service, returned as a response to the GET API.
properties:
diagnostics:
type: string
description: Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.
state:
description: Service state.
$ref: '#/definitions/ServiceState'
code:
type: integer
format: int32
description: An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.

View File

@ -0,0 +1,76 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is the log4j configuration for YARN Services REST API Server
# Log rotation based on size (100KB) with a max of 10 backup files
log4j.rootLogger=INFO, restservicelog
log4j.threshhold=ALL
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
log4j.appender.restservicelog=org.apache.log4j.RollingFileAppender
log4j.appender.restservicelog.layout=org.apache.log4j.PatternLayout
log4j.appender.restservicelog.File=${REST_SERVICE_LOG_DIR}/restservice.log
log4j.appender.restservicelog.MaxFileSize=1GB
log4j.appender.restservicelog.MaxBackupIndex=10
# log layout skips stack-trace creation operations by avoiding line numbers and method
log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
# debug edition is much more expensive
#log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
# configure stderr
# set the conversion pattern of stderr
# Print the date in ISO 8601 format
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.Target=System.err
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
# for debugging REST API Service
#log4j.logger.org.apache.hadoop.yarn.services=DEBUG
# uncomment to debug service lifecycle issues
#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
# uncomment for YARN operations
#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
# uncomment this to debug security problems
#log4j.logger.org.apache.hadoop.security=DEBUG
#crank back on some noise
log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
log4j.logger.org.apache.hadoop.hdfs=WARN
log4j.logger.org.apache.hadoop.hdfs.shortcircuit=ERROR
log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
log4j.logger.org.apache.zookeeper=WARN
log4j.logger.org.apache.curator.framework.state=ERROR
log4j.logger.org.apache.curator.framework.imps=WARN
log4j.logger.org.mortbay.log=DEBUG

View File

@ -0,0 +1,16 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DON'T DELETE. REST WEBAPP RUN SCRIPT WILL STOP WORKING.

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<web-app xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd"
version="3.0">
<servlet>
<servlet-name>Jersey REST API</servlet-name>
<servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
<init-param>
<param-name>com.sun.jersey.config.property.packages</param-name>
<param-value>org.apache.hadoop.yarn.service.webapp,org.apache.hadoop.yarn.service.api,org.apache.hadoop.yarn.service.api.records</param-value>
</init-param>
<init-param>
<param-name>com.sun.jersey.api.json.POJOMappingFeature</param-name>
<param-value>true</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>Jersey REST API</servlet-name>
<url-pattern>/*</url-pattern>
</servlet-mapping>
</web-app>

View File

@ -0,0 +1,107 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.client.ServiceClient;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
/**
* A mock version of ServiceClient - This class is design
* to simulate various error conditions that will happen
* when a consumer class calls ServiceClient.
*/
public class ServiceClientTest extends ServiceClient {
private Configuration conf = new Configuration();
protected static void init() {
}
public ServiceClientTest() {
super();
}
@Override
public Configuration getConfig() {
return conf;
}
@Override
public ApplicationId actionCreate(Service service) {
String serviceName = service.getName();
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
return ApplicationId.newInstance(System.currentTimeMillis(), 1);
}
@Override
public Service getStatus(String appName) {
if (appName == null) {
throw new NullPointerException();
}
if (appName.equals("jenkins")) {
return new Service();
} else {
throw new IllegalArgumentException();
}
}
@Override
public int actionStart(String serviceName)
throws YarnException, IOException {
if (serviceName == null) {
throw new NullPointerException();
}
if (serviceName.equals("jenkins")) {
return EXIT_SUCCESS;
} else {
throw new ApplicationNotFoundException("");
}
}
@Override
public int actionStop(String serviceName, boolean waitForAppStopped)
throws YarnException, IOException {
if (serviceName == null) {
throw new NullPointerException();
}
if (serviceName.equals("jenkins")) {
return EXIT_SUCCESS;
} else {
throw new ApplicationNotFoundException("");
}
}
@Override
public int actionDestroy(String serviceName) {
if (serviceName == null) {
throw new NullPointerException();
}
if (serviceName.equals("jenkins")) {
return EXIT_SUCCESS;
} else {
throw new IllegalArgumentException();
}
}
}

View File

@ -0,0 +1,366 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.Resource;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.ServiceState;
import org.apache.hadoop.yarn.service.client.ServiceClient;
import org.apache.hadoop.yarn.service.webapp.ApiServer;
import javax.ws.rs.Path;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.*;
/**
* Test case for ApiServer REST API.
*
*/
public class TestApiServer {
private ApiServer apiServer;
@Before
public void setup() throws Exception {
ServiceClient mockServerClient = new ServiceClientTest();
Configuration conf = new Configuration();
conf.set("yarn.api-service.service.client.class",
ServiceClientTest.class.getName());
ApiServer.setServiceClient(mockServerClient);
this.apiServer = new ApiServer(conf);
}
@Test
public void testPathAnnotation() {
assertNotNull(this.apiServer.getClass().getAnnotation(Path.class));
assertTrue("The controller has the annotation Path",
this.apiServer.getClass().isAnnotationPresent(Path.class));
final Path path = this.apiServer.getClass()
.getAnnotation(Path.class);
assertEquals("The path has /ws/v1 annotation", path.value(),
"/ws/v1");
}
@Test
public void testGetVersion() {
final Response actual = apiServer.getVersion();
assertEquals("Version number is", actual.getStatus(),
Response.ok().build().getStatus());
}
@Test
public void testBadCreateService() {
Service service = new Service();
// Test for invalid argument
final Response actual = apiServer.createService(service);
assertEquals("Create service is ", actual.getStatus(),
Response.status(Status.BAD_REQUEST).build().getStatus());
}
@Test
public void testGoodCreateService() {
Service service = new Service();
service.setName("jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("jenkins");
c.setNumberOfContainers(1L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
final Response actual = apiServer.createService(service);
assertEquals("Create service is ", actual.getStatus(),
Response.status(Status.ACCEPTED).build().getStatus());
}
@Test
public void testBadGetService() {
final Response actual = apiServer.getService("no-jenkins");
assertEquals("Get service is ", actual.getStatus(),
Response.status(Status.NOT_FOUND).build().getStatus());
}
@Test
public void testBadGetService2() {
final Response actual = apiServer.getService(null);
assertEquals("Get service is ", actual.getStatus(),
Response.status(Status.INTERNAL_SERVER_ERROR)
.build().getStatus());
}
@Test
public void testGoodGetService() {
final Response actual = apiServer.getService("jenkins");
assertEquals("Get service is ", actual.getStatus(),
Response.status(Status.OK).build().getStatus());
}
@Test
public void testBadDeleteService() {
final Response actual = apiServer.deleteService("no-jenkins");
assertEquals("Delete service is ", actual.getStatus(),
Response.status(Status.BAD_REQUEST).build().getStatus());
}
@Test
public void testBadDeleteService2() {
final Response actual = apiServer.deleteService(null);
assertEquals("Delete service is ", actual.getStatus(),
Response.status(Status.INTERNAL_SERVER_ERROR)
.build().getStatus());
}
@Test
public void testGoodDeleteService() {
final Response actual = apiServer.deleteService("jenkins");
assertEquals("Delete service is ", actual.getStatus(),
Response.status(Status.OK).build().getStatus());
}
@Test
public void testDecreaseContainerAndStop() {
Service service = new Service();
service.setState(ServiceState.STOPPED);
service.setName("jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("jenkins");
c.setNumberOfContainers(0L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
final Response actual = apiServer.updateService("jenkins",
service);
assertEquals("update service is ", actual.getStatus(),
Response.status(Status.OK).build().getStatus());
}
@Test
public void testBadDecreaseContainerAndStop() {
Service service = new Service();
service.setState(ServiceState.STOPPED);
service.setName("no-jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("no-jenkins");
c.setNumberOfContainers(-1L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
System.out.println("before stop");
final Response actual = apiServer.updateService("no-jenkins",
service);
assertEquals("flex service is ", actual.getStatus(),
Response.status(Status.BAD_REQUEST).build().getStatus());
}
@Test
public void testIncreaseContainersAndStart() {
Service service = new Service();
service.setState(ServiceState.STARTED);
service.setName("jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("jenkins");
c.setNumberOfContainers(2L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
final Response actual = apiServer.updateService("jenkins",
service);
assertEquals("flex service is ", actual.getStatus(),
Response.status(Status.OK).build().getStatus());
}
@Test
public void testBadStartServices() {
Service service = new Service();
service.setState(ServiceState.STARTED);
service.setName("no-jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("jenkins");
c.setNumberOfContainers(2L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
final Response actual = apiServer.updateService("no-jenkins",
service);
assertEquals("start service is ", actual.getStatus(),
Response.status(Status.INTERNAL_SERVER_ERROR).build()
.getStatus());
}
@Test
public void testGoodStartServices() {
Service service = new Service();
service.setState(ServiceState.STARTED);
service.setName("jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("jenkins");
c.setNumberOfContainers(2L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
final Response actual = apiServer.updateService("jenkins",
service);
assertEquals("start service is ", actual.getStatus(),
Response.status(Status.OK).build().getStatus());
}
@Test
public void testBadStopServices() {
Service service = new Service();
service.setState(ServiceState.STOPPED);
service.setName("no-jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("no-jenkins");
c.setNumberOfContainers(-1L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
System.out.println("before stop");
final Response actual = apiServer.updateService("no-jenkins",
service);
assertEquals("stop service is ", actual.getStatus(),
Response.status(Status.BAD_REQUEST).build().getStatus());
}
@Test
public void testGoodStopServices() {
Service service = new Service();
service.setState(ServiceState.STARTED);
service.setName("jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("jenkins");
c.setNumberOfContainers(-1L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
System.out.println("before stop");
final Response actual = apiServer.updateService("jenkins",
service);
assertEquals("stop service is ", actual.getStatus(),
Response.status(Status.OK).build().getStatus());
}
@Test
public void testUpdateService() {
Service service = new Service();
service.setState(ServiceState.STARTED);
service.setName("no-jenkins");
Artifact artifact = new Artifact();
artifact.setType(TypeEnum.DOCKER);
artifact.setId("jenkins:latest");
Resource resource = new Resource();
resource.setCpus(1);
resource.setMemory("2048");
List<Component> components = new ArrayList<Component>();
Component c = new Component();
c.setName("no-jenkins");
c.setNumberOfContainers(-1L);
c.setArtifact(artifact);
c.setLaunchCommand("");
c.setResource(resource);
components.add(c);
service.setComponents(components);
System.out.println("before stop");
final Response actual = apiServer.updateService("no-jenkins",
service);
assertEquals("update service is ", actual.getStatus(),
Response.status(Status.INTERNAL_SERVER_ERROR)
.build().getStatus());
}
}

View File

@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.yarn.proto" />
</Match>
<Match>
<class name="org.apache.hadoop.yarn.service.utils.ServiceApiUtil" />
<Bug pattern="MS_CANNOT_BE_FINAL" />
</Match>
<Match>
<Class name="org.apache.hadoop.yarn.service.utils.JsonSerDeser" />
<Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
</Match>
<Match>
<Class name="org.apache.hadoop.yarn.service.utils.JsonSerDeser" />
<Bug pattern="UI_INHERITANCE_UNSAFE_GETRESOURCE" />
</Match>
<Match>
<Package name="org.apache.hadoop.yarn.service.client.params"/>
<Bug pattern="UWF_UNWRITTEN_PUBLIC_OR_PROTECTED_FIELD"/>
</Match>
<Match>
<Package name="org.apache.hadoop.yarn.service.client.params"/>
<Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD"/>
</Match>
<Match>
<Class name="org.apache.hadoop.yarn.service.client.ServiceClient"/>
<Field name="registryClient" />
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
</Match>
</FindBugsFilter>

View File

@ -0,0 +1,62 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is the log4j configuration for Slider Application Master
# Log rotation based on size (256MB) with a max of 20 backup files
log4j.rootLogger=INFO, amlog
log4j.threshhold=ALL
log4j.appender.amlog=org.apache.log4j.RollingFileAppender
log4j.appender.amlog.layout=org.apache.log4j.PatternLayout
log4j.appender.amlog.File=${LOG_DIR}/serviceam.log
log4j.appender.amlog.MaxFileSize=256MB
log4j.appender.amlog.MaxBackupIndex=20
# log layout skips stack-trace creation operations by avoiding line numbers and method
log4j.appender.amlog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
# debug edition is much more expensive
#log4j.appender.amlog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
# configure stderr
# set the conversion pattern of stderr
# Print the date in ISO 8601 format
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.Target=System.err
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
# for debugging yarn-service framework
#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
# uncomment for YARN operations
#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
# uncomment this to debug security problems
#log4j.logger.org.apache.hadoop.security=DEBUG
#crank back on some noise
log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
log4j.logger.org.apache.hadoop.hdfs=WARN
log4j.logger.org.apache.zookeeper=WARN
log4j.logger.org.apache.curator.framework.state=ERROR
log4j.logger.org.apache.curator.framework.imps=WARN

View File

@ -0,0 +1,62 @@
{
"name": "httpd-service-no-dns",
"lifetime": "3600",
"components": [
{
"name": "httpd",
"number_of_containers": 2,
"artifact": {
"id": "centos/httpd-24-centos7:latest",
"type": "DOCKER"
},
"launch_command": "/usr/bin/run-httpd",
"resource": {
"cpus": 1,
"memory": "1024"
},
"readiness_check": {
"type": "HTTP",
"properties": {
"url": "http://${THIS_HOST}:8080"
}
},
"configuration": {
"files": [
{
"type": "TEMPLATE",
"dest_file": "/var/www/html/index.html",
"properties": {
"content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>"
}
}
]
}
},
{
"name": "httpd-proxy",
"number_of_containers": 1,
"dependencies": [ "httpd" ],
"artifact": {
"id": "centos/httpd-24-centos7:latest",
"type": "DOCKER"
},
"launch_command": "/usr/bin/run-httpd",
"resource": {
"cpus": 1,
"memory": "1024"
},
"configuration": {
"files": [
{
"type": "TEMPLATE",
"dest_file": "/etc/httpd/conf.d/httpd-proxy.conf",
"src_file": "httpd-proxy-no-dns.conf"
}
]
}
}
],
"quicklinks": {
"Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080"
}
}

View File

@ -0,0 +1,24 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<Proxy balancer://test>
BalancerMember http://${HTTPD-0_IP}:8080
BalancerMember http://${HTTPD-1_IP}:8080
ProxySet lbmethod=bytraffic
</Proxy>
ProxyPass "/" "balancer://test/"
ProxyPassReverse "/" "balancer://test/"

View File

@ -0,0 +1,24 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<Proxy balancer://test>
BalancerMember http://httpd-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080
BalancerMember http://httpd-1.${SERVICE_NAME}.${USER}.${DOMAIN}:8080
ProxySet lbmethod=bytraffic
</Proxy>
ProxyPass "/" "balancer://test/"
ProxyPassReverse "/" "balancer://test/"

View File

@ -0,0 +1,55 @@
{
"name": "httpd-service",
"lifetime": "3600",
"components": [
{
"name": "httpd",
"number_of_containers": 2,
"artifact": {
"id": "centos/httpd-24-centos7:latest",
"type": "DOCKER"
},
"launch_command": "/usr/bin/run-httpd",
"resource": {
"cpus": 1,
"memory": "1024"
},
"configuration": {
"files": [
{
"type": "TEMPLATE",
"dest_file": "/var/www/html/index.html",
"properties": {
"content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>"
}
}
]
}
},
{
"name": "httpd-proxy",
"number_of_containers": 1,
"artifact": {
"id": "centos/httpd-24-centos7:latest",
"type": "DOCKER"
},
"launch_command": "/usr/bin/run-httpd",
"resource": {
"cpus": 1,
"memory": "1024"
},
"configuration": {
"files": [
{
"type": "TEMPLATE",
"dest_file": "/etc/httpd/conf.d/httpd-proxy.conf",
"src_file": "httpd-proxy.conf"
}
]
}
}
],
"quicklinks": {
"Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080"
}
}

View File

@ -0,0 +1,15 @@
{
"name": "sleeper-service",
"components" :
[
{
"name": "sleeper",
"number_of_containers": 2,
"launch_command": "sleep 900000",
"resource": {
"cpus": 1,
"memory": "256"
}
}
]
}

View File

@ -0,0 +1,255 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-services</artifactId>
<version>3.1.0-SNAPSHOT</version>
</parent>
<artifactId>hadoop-yarn-services-core</artifactId>
<packaging>jar</packaging>
<name>Apache Hadoop YARN Services Core</name>
<properties>
<!-- Needed for generating FindBugs warnings using parent pom -->
<yarn.basedir>${project.parent.basedir}</yarn.basedir>
</properties>
<build>
<!-- resources are filtered for dynamic updates. This gets build info in-->
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<executions>
<execution>
<id>compile-protoc</id>
<goals>
<goal>protoc</goal>
</goals>
<configuration>
<protocVersion>${protobuf.version}</protocVersion>
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>${basedir}/src/main/proto</param>
</imports>
<source>
<directory>${basedir}/src/main/proto</directory>
<includes>
<include>ClientAMProtocol.proto</include>
</includes>
</source>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<environmentVariables>
<JAVA_HOME>${java.home}</JAVA_HOME>
</environmentVariables>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
<exclude>**/*.json</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-registry</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-configuration2</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
</dependency>
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
</dependency>
<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-annotations</artifactId>
</dependency>
<!-- ======================================================== -->
<!-- Test dependencies -->
<!-- ======================================================== -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,40 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto;
import java.io.IOException;
public interface ClientAMProtocol {
FlexComponentsResponseProto flexComponents(FlexComponentsRequestProto request)
throws IOException, YarnException;
GetStatusResponseProto getStatus(GetStatusRequestProto requestProto)
throws IOException, YarnException;
StopResponseProto stop(StopRequestProto requestProto)
throws IOException, YarnException;
}

View File

@ -0,0 +1,132 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto;
import org.apache.hadoop.yarn.service.component.ComponentEvent;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX;
public class ClientAMService extends AbstractService
implements ClientAMProtocol {
private static final Logger LOG =
LoggerFactory.getLogger(ClientAMService.class);
private ServiceContext context;
private Server server;
private InetSocketAddress bindAddress;
public ClientAMService(ServiceContext context) {
super("Client AM Service");
this.context = context;
}
@Override protected void serviceStart() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress address = new InetSocketAddress(0);
server = rpc.getServer(ClientAMProtocol.class, this, address, conf,
context.secretManager, 1);
server.start();
String nodeHostString =
System.getenv(ApplicationConstants.Environment.NM_HOST.name());
bindAddress = NetUtils.createSocketAddrForHost(nodeHostString,
server.getListenerAddress().getPort());
LOG.info("Instantiated ClientAMService at " + bindAddress);
super.serviceStart();
}
@Override protected void serviceStop() throws Exception {
if (server != null) {
server.stop();
}
super.serviceStop();
}
@Override public FlexComponentsResponseProto flexComponents(
FlexComponentsRequestProto request) throws IOException {
if (!request.getComponentsList().isEmpty()) {
for (ComponentCountProto component : request.getComponentsList()) {
ComponentEvent event = new ComponentEvent(component.getName(), FLEX)
.setDesired(component.getNumberOfContainers());
context.scheduler.getDispatcher().getEventHandler().handle(event);
LOG.info("Flexing component {} to {}", component.getName(),
component.getNumberOfContainers());
}
}
return FlexComponentsResponseProto.newBuilder().build();
}
@Override
public GetStatusResponseProto getStatus(GetStatusRequestProto request)
throws IOException, YarnException {
String stat = ServiceApiUtil.jsonSerDeser.toJson(context.service);
return GetStatusResponseProto.newBuilder().setStatus(stat).build();
}
@Override
public StopResponseProto stop(StopRequestProto requestProto)
throws IOException, YarnException {
LOG.info("Stop the service.");
// Stop the service in 2 seconds delay to make sure this rpc call is completed.
// shutdown hook will be executed which will stop AM gracefully.
Thread thread = new Thread() {
@Override
public void run() {
try {
Thread.sleep(2000);
ExitUtil.terminate(0);
} catch (InterruptedException e) {
LOG.error("Interrupted while stopping", e);
}
}
};
thread.start();
return StopResponseProto.newBuilder().build();
}
public InetSocketAddress getBindAddress() {
return bindAddress;
}
}

View File

@ -0,0 +1,89 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.yarn.service.component.Component;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.NODE_BLACKLIST_THRESHOLD;
/**
* This tracks the container failures per node. If the failure counter exceeds
* the maxFailurePerNode limit, it'll blacklist that node.
*
*/
public class ContainerFailureTracker {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerFailureTracker.class);
// Host -> num container failures
private Map<String, Integer> failureCountPerNode = new HashMap<>();
private Set<String> blackListedNodes = new HashSet<>();
private ServiceContext context;
private int maxFailurePerNode;
private Component component;
public ContainerFailureTracker(ServiceContext context, Component component) {
this.context = context;
this.component = component;
maxFailurePerNode = component.getComponentSpec().getConfiguration()
.getPropertyInt(NODE_BLACKLIST_THRESHOLD, 3);
}
public synchronized void incNodeFailure(String host) {
int num = 0;
if (failureCountPerNode.containsKey(host)) {
num = failureCountPerNode.get(host);
}
num++;
failureCountPerNode.put(host, num);
// black list the node if exceed max failure
if (num > maxFailurePerNode && !blackListedNodes.contains(host)) {
List<String> blacklists = new ArrayList<>();
blacklists.add(host);
blackListedNodes.add(host);
context.scheduler.getAmRMClient().updateBlacklist(blacklists, null);
LOG.info("[COMPONENT {}]: Failed {} times on this host, blacklisted {}."
+ " Current list of blacklisted nodes: {}",
component.getName(), num, host, blackListedNodes);
}
}
public synchronized void resetContainerFailures() {
// reset container failure counter per node
failureCountPerNode.clear();
context.scheduler.getAmRMClient()
.updateBlacklist(null, new ArrayList<>(blackListedNodes));
LOG.info("[COMPONENT {}]: Clearing blacklisted nodes {} ",
component.getName(), blackListedNodes);
blackListedNodes.clear();
}
}

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import com.google.common.cache.LoadingCache;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
public class ServiceContext {
public Service service = null;
public SliderFileSystem fs;
public String serviceHdfsDir = "";
public ApplicationAttemptId attemptId;
public LoadingCache<ConfigFile, Object> configCache;
public ServiceScheduler scheduler;
public ClientToAMTokenSecretManager secretManager;
public ClientAMService clientAMService;
public ServiceContext() {
}
}

View File

@ -0,0 +1,169 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.service.monitor.ServiceMonitor;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Map;
public class ServiceMaster extends CompositeService {
private static final Logger LOG =
LoggerFactory.getLogger(ServiceMaster.class);
public static final String YARNFILE_OPTION = "yarnfile";
private static String serviceDefPath;
protected ServiceContext context;
public ServiceMaster(String name) {
super(name);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
//TODO Deprecate slider conf, make sure works with yarn conf
printSystemEnv();
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation.setConfiguration(conf);
}
LOG.info("Login user is {}", UserGroupInformation.getLoginUser());
context = new ServiceContext();
Path appDir = getAppDir();
context.serviceHdfsDir = appDir.toString();
SliderFileSystem fs = new SliderFileSystem(conf);
context.fs = fs;
fs.setAppDir(appDir);
loadApplicationJson(context, fs);
// Take yarn config from YarnFile and merge them into YarnConfiguration
for (Map.Entry<String, String> entry : context.service
.getConfiguration().getProperties().entrySet()) {
conf.set(entry.getKey(), entry.getValue());
}
ContainerId amContainerId = getAMContainerId();
ApplicationAttemptId attemptId = amContainerId.getApplicationAttemptId();
LOG.info("Service AppAttemptId: " + attemptId);
context.attemptId = attemptId;
// configure AM to wait forever for RM
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1);
conf.unset(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS);
DefaultMetricsSystem.initialize("ServiceAppMaster");
context.secretManager = new ClientToAMTokenSecretManager(attemptId, null);
ClientAMService clientAMService = new ClientAMService(context);
context.clientAMService = clientAMService;
addService(clientAMService);
ServiceScheduler scheduler = createServiceScheduler(context);
addService(scheduler);
context.scheduler = scheduler;
ServiceMonitor monitor = new ServiceMonitor("Service Monitor", context);
addService(monitor);
super.serviceInit(conf);
}
protected ContainerId getAMContainerId() throws BadClusterStateException {
return ContainerId.fromString(ServiceUtils.mandatoryEnvVariable(
ApplicationConstants.Environment.CONTAINER_ID.name()));
}
protected Path getAppDir() {
return new Path(serviceDefPath).getParent();
}
protected ServiceScheduler createServiceScheduler(ServiceContext context)
throws IOException, YarnException {
return new ServiceScheduler(context);
}
protected void loadApplicationJson(ServiceContext context,
SliderFileSystem fs) throws IOException {
context.service = ServiceApiUtil
.loadServiceFrom(fs, new Path(serviceDefPath));
LOG.info(context.service.toString());
}
@Override
protected void serviceStop() throws Exception {
LOG.info("Stopping app master");
super.serviceStop();
}
private void printSystemEnv() {
for (Map.Entry<String, String> envs : System.getenv().entrySet()) {
LOG.info("{} = {}", envs.getKey(), envs.getValue());
}
}
public static void main(String[] args) throws Exception {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(ServiceMaster.class, args, LOG);
try {
ServiceMaster serviceMaster = new ServiceMaster("Service Master");
ShutdownHookManager.get()
.addShutdownHook(new CompositeServiceShutdownHook(serviceMaster), 30);
YarnConfiguration conf = new YarnConfiguration();
Options opts = new Options();
opts.addOption(YARNFILE_OPTION, true, "HDFS path to JSON service " +
"specification");
opts.getOption(YARNFILE_OPTION).setRequired(true);
GenericOptionsParser parser = new GenericOptionsParser(conf, opts, args);
CommandLine cmdLine = parser.getCommandLine();
serviceMaster.serviceDefPath = cmdLine.getOptionValue(YARNFILE_OPTION);
serviceMaster.init(conf);
serviceMaster.start();
} catch (Throwable t) {
LOG.error("Error starting service master", t);
ExitUtil.terminate(1, "Error starting service master");
}
}
}

View File

@ -0,0 +1,94 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import static org.apache.hadoop.metrics2.lib.Interns.info;
@Metrics(context = "yarn-native-service")
public class ServiceMetrics implements MetricsSource {
@Metric("containers requested")
public MutableGaugeInt containersRequested;
@Metric("containers running")
public MutableGaugeInt containersRunning;
@Metric("containers ready")
public MutableGaugeInt containersReady;
@Metric("containers desired")
public MutableGaugeInt containersDesired;
@Metric("containers succeeded")
public MutableGaugeInt containersSucceeded;
@Metric("containers failed")
public MutableGaugeInt containersFailed;
@Metric("containers preempted")
public MutableGaugeInt containersPreempted;
@Metric("containers surplus")
public MutableGaugeInt surplusContainers;
@Metric("containers failed due to disk failure")
public MutableGaugeInt containersDiskFailure;
protected final MetricsRegistry registry;
public ServiceMetrics(MetricsInfo metricsInfo) {
registry = new MetricsRegistry(metricsInfo);
}
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
registry.snapshot(collector.addRecord(registry.info()), all);
}
public static ServiceMetrics register(String name, String description) {
ServiceMetrics metrics = new ServiceMetrics(info(name, description));
DefaultMetricsSystem.instance().register(name, description, metrics);
return metrics;
}
public void tag(String name, String description, String value) {
registry.tag(name, description, value);
}
@Override public String toString() {
return "ServiceMetrics{"
+ "containersRequested=" + containersRequested.value()
+ ", containersRunning=" + containersRunning.value()
+ ", containersDesired=" + containersDesired.value()
+ ", containersSucceeded=" + containersSucceeded.value()
+ ", containersFailed=" + containersFailed.value()
+ ", containersPreempted=" + containersPreempted.value()
+ ", surplusContainers=" + surplusContainers.value() + '}';
}
}

View File

@ -0,0 +1,691 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.UpdatedContainer;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.TimelineV2Client;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.service.api.ServiceApiConstants;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType;
import org.apache.hadoop.yarn.service.component.Component;
import org.apache.hadoop.yarn.service.component.ComponentEvent;
import org.apache.hadoop.yarn.service.component.ComponentEventType;
import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
import org.apache.hadoop.yarn.service.provider.ProviderUtils;
import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
import org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink;
import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
import org.apache.hadoop.yarn.util.BoundedAppender;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.nio.ByteBuffer;
import java.text.MessageFormat;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
/**
*
*/
public class ServiceScheduler extends CompositeService {
private static final Logger LOG =
LoggerFactory.getLogger(ServiceScheduler.class);
private Service app;
// component_name -> component
private final Map<String, Component> componentsByName =
new ConcurrentHashMap<>();
// id - > component
protected final Map<Long, Component> componentsById =
new ConcurrentHashMap<>();
private final Map<ContainerId, ComponentInstance> liveInstances =
new ConcurrentHashMap<>();
private ServiceMetrics serviceMetrics;
private ServiceTimelinePublisher serviceTimelinePublisher;
// Global diagnostics that will be reported to RM on eRxit.
// The unit the number of characters. This will be limited to 64 * 1024
// characters.
private BoundedAppender diagnostics = new BoundedAppender(64 * 1024);
// A cache for loading config files from remote such as hdfs
public LoadingCache<ConfigFile, Object> configFileCache = null;
public ScheduledExecutorService executorService;
public Map<String, String> globalTokens = new HashMap<>();
private AMRMClientAsync<AMRMClient.ContainerRequest> amRMClient;
private NMClientAsync nmClient;
private AsyncDispatcher dispatcher;
AsyncDispatcher compInstanceDispatcher;
private YarnRegistryViewForProviders yarnRegistryOperations;
private ServiceContext context;
private ContainerLaunchService containerLaunchService;
public ServiceScheduler(ServiceContext context) {
super(context.service.getName());
this.context = context;
}
public void buildInstance(ServiceContext context, Configuration configuration)
throws YarnException {
app = context.service;
executorService = Executors.newScheduledThreadPool(10);
RegistryOperations registryClient = RegistryOperationsFactory
.createInstance("ServiceScheduler", configuration);
addIfService(registryClient);
yarnRegistryOperations =
createYarnRegistryOperations(context, registryClient);
// register metrics
serviceMetrics = ServiceMetrics
.register(app.getName(), "Metrics for service");
serviceMetrics.tag("type", "Metrics type [component or service]", "service");
serviceMetrics.tag("appId", "Service id for service", app.getId());
amRMClient = createAMRMClient();
addIfService(amRMClient);
nmClient = createNMClient();
addIfService(nmClient);
dispatcher = new AsyncDispatcher("Component dispatcher");
dispatcher.register(ComponentEventType.class,
new ComponentEventHandler());
dispatcher.setDrainEventsOnStop();
addIfService(dispatcher);
compInstanceDispatcher =
new AsyncDispatcher("CompInstance dispatcher");
compInstanceDispatcher.register(ComponentInstanceEventType.class,
new ComponentInstanceEventHandler());
addIfService(compInstanceDispatcher);
containerLaunchService = new ContainerLaunchService(context.fs);
addService(containerLaunchService);
if (YarnConfiguration.timelineServiceV2Enabled(configuration)) {
TimelineV2Client timelineClient = TimelineV2Client
.createTimelineClient(context.attemptId.getApplicationId());
amRMClient.registerTimelineV2Client(timelineClient);
serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient);
addService(serviceTimelinePublisher);
DefaultMetricsSystem.instance().register("ServiceMetricsSink",
"For processing metrics to ATS",
new ServiceMetricsSink(serviceTimelinePublisher));
LOG.info("Timeline v2 is enabled.");
}
initGlobalTokensForSubstitute(context);
//substitute quicklinks
ProviderUtils.substituteMapWithTokens(app.getQuicklinks(), globalTokens);
createConfigFileCache(context.fs.getFileSystem());
createAllComponents();
}
protected YarnRegistryViewForProviders createYarnRegistryOperations(
ServiceContext context, RegistryOperations registryClient) {
return new YarnRegistryViewForProviders(registryClient,
RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, app.getName(),
context.attemptId);
}
protected NMClientAsync createNMClient() {
return NMClientAsync.createNMClientAsync(new NMClientCallback());
}
protected AMRMClientAsync<AMRMClient.ContainerRequest> createAMRMClient() {
return AMRMClientAsync
.createAMRMClientAsync(1000, new AMRMClientCallback());
}
@Override
public void serviceInit(Configuration conf) throws Exception {
try {
buildInstance(context, conf);
} catch (YarnException e) {
throw new YarnRuntimeException(e);
}
super.serviceInit(conf);
}
@Override
public void serviceStop() throws Exception {
LOG.info("Stopping service scheduler");
if (executorService != null) {
executorService.shutdownNow();
}
DefaultMetricsSystem.shutdown();
if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
serviceTimelinePublisher
.serviceAttemptUnregistered(context, diagnostics.toString());
}
String msg = diagnostics.toString()
+ "Navigate to the failed component for more details.";
amRMClient
.unregisterApplicationMaster(FinalApplicationStatus.ENDED, msg, "");
LOG.info("Service " + app.getName()
+ " unregistered with RM, with attemptId = " + context.attemptId
+ ", diagnostics = " + diagnostics);
super.serviceStop();
}
@Override
public void serviceStart() throws Exception {
super.serviceStart();
InetSocketAddress bindAddress = context.clientAMService.getBindAddress();
RegisterApplicationMasterResponse response = amRMClient
.registerApplicationMaster(bindAddress.getHostName(),
bindAddress.getPort(), "N/A");
if (response.getClientToAMTokenMasterKey() != null
&& response.getClientToAMTokenMasterKey().remaining() != 0) {
context.secretManager
.setMasterKey(response.getClientToAMTokenMasterKey().array());
}
registerServiceInstance(context.attemptId, app);
// recover components based on containers sent from RM
recoverComponents(response);
for (Component component : componentsById.values()) {
// Trigger initial evaluation of components
if (component.areDependenciesReady()) {
LOG.info("Triggering initial evaluation of component {}",
component.getName());
ComponentEvent event = new ComponentEvent(component.getName(), FLEX)
.setDesired(component.getComponentSpec().getNumberOfContainers());
component.handle(event);
}
}
}
private void recoverComponents(RegisterApplicationMasterResponse response) {
List<Container> recoveredContainers = response
.getContainersFromPreviousAttempts();
LOG.info("Received {} containers from previous attempt.",
recoveredContainers.size());
Map<String, ServiceRecord> existingRecords = new HashMap<>();
List<String> existingComps = null;
try {
existingComps = yarnRegistryOperations.listComponents();
LOG.info("Found {} containers from ZK registry: {}", existingComps.size(),
existingComps);
} catch (Exception e) {
LOG.info("Could not read component paths: {}", e.getMessage());
}
if (existingComps != null) {
for (String existingComp : existingComps) {
try {
ServiceRecord record =
yarnRegistryOperations.getComponent(existingComp);
existingRecords.put(existingComp, record);
} catch (Exception e) {
LOG.warn("Could not resolve record for component {}: {}",
existingComp, e);
}
}
}
for (Container container : recoveredContainers) {
LOG.info("Handling container {} from previous attempt",
container.getId());
ServiceRecord record = existingRecords.get(RegistryPathUtils
.encodeYarnID(container.getId().toString()));
if (record != null) {
Component comp = componentsById.get(container.getAllocationRequestId());
ComponentEvent event =
new ComponentEvent(comp.getName(), CONTAINER_RECOVERED)
.setContainer(container)
.setInstance(comp.getComponentInstance(record.description));
comp.handle(event);
// do not remove requests in this case because we do not know if they
// have already been removed
} else {
LOG.info("Record not found in registry for container {} from previous" +
" attempt, releasing", container.getId());
amRMClient.releaseAssignedContainer(container.getId());
}
}
}
private void initGlobalTokensForSubstitute(ServiceContext context) {
// ZK
globalTokens.put(ServiceApiConstants.CLUSTER_ZK_QUORUM, getConfig()
.getTrimmed(KEY_REGISTRY_ZK_QUORUM, DEFAULT_REGISTRY_ZK_QUORUM));
String user = null;
try {
user = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException e) {
LOG.error("Failed to get user.", e);
}
globalTokens
.put(SERVICE_ZK_PATH, ServiceRegistryUtils.mkClusterPath(user, app.getName()));
globalTokens.put(ServiceApiConstants.USER, user);
String dnsDomain = getConfig().getTrimmed(KEY_DNS_DOMAIN);
if (dnsDomain != null && !dnsDomain.isEmpty()) {
globalTokens.put(ServiceApiConstants.DOMAIN, dnsDomain);
}
// HDFS
String clusterFs = getConfig().getTrimmed(FS_DEFAULT_NAME_KEY);
if (clusterFs != null && !clusterFs.isEmpty()) {
globalTokens.put(ServiceApiConstants.CLUSTER_FS_URI, clusterFs);
globalTokens.put(ServiceApiConstants.CLUSTER_FS_HOST,
URI.create(clusterFs).getHost());
}
globalTokens.put(SERVICE_HDFS_DIR, context.serviceHdfsDir);
// service name
globalTokens.put(SERVICE_NAME_LC, app.getName().toLowerCase());
globalTokens.put(SERVICE_NAME, app.getName());
}
private void createConfigFileCache(final FileSystem fileSystem) {
this.configFileCache =
CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES)
.build(new CacheLoader<ConfigFile, Object>() {
@Override public Object load(ConfigFile key) throws Exception {
switch (key.getType()) {
case HADOOP_XML:
try (FSDataInputStream input = fileSystem
.open(new Path(key.getSrcFile()))) {
org.apache.hadoop.conf.Configuration confRead =
new org.apache.hadoop.conf.Configuration(false);
confRead.addResource(input);
Map<String, String> map = new HashMap<>(confRead.size());
for (Map.Entry<String, String> entry : confRead) {
map.put(entry.getKey(), entry.getValue());
}
return map;
}
case TEMPLATE:
try (FSDataInputStream fileInput = fileSystem
.open(new Path(key.getSrcFile()))) {
return IOUtils.toString(fileInput);
}
default:
return null;
}
}
});
context.configCache = configFileCache;
}
private void registerServiceInstance(ApplicationAttemptId attemptId,
Service service) throws IOException {
LOG.info("Registering " + attemptId + ", " + service.getName()
+ " into registry");
ServiceRecord serviceRecord = new ServiceRecord();
serviceRecord.set(YarnRegistryAttributes.YARN_ID,
attemptId.getApplicationId().toString());
serviceRecord.set(YarnRegistryAttributes.YARN_PERSISTENCE,
PersistencePolicies.APPLICATION);
serviceRecord.description = "YarnServiceMaster";
executorService.submit(new Runnable() {
@Override public void run() {
try {
yarnRegistryOperations.registerSelf(serviceRecord, false);
LOG.info("Registered service under {}; absolute path {}",
yarnRegistryOperations.getSelfRegistrationPath(),
yarnRegistryOperations.getAbsoluteSelfRegistrationPath());
boolean isFirstAttempt = 1 == attemptId.getAttemptId();
// delete the children in case there are any and this is an AM startup.
// just to make sure everything underneath is purged
if (isFirstAttempt) {
yarnRegistryOperations.deleteChildren(
yarnRegistryOperations.getSelfRegistrationPath(), true);
}
} catch (IOException e) {
LOG.error(
"Failed to register app " + app.getName() + " in registry");
}
}
});
if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
serviceTimelinePublisher.serviceAttemptRegistered(app, getConfig());
}
}
private void createAllComponents() {
long allocateId = 0;
// sort components by dependencies
Collection<org.apache.hadoop.yarn.service.api.records.Component> sortedComponents =
ServiceApiUtil.sortByDependencies(app.getComponents());
for (org.apache.hadoop.yarn.service.api.records.Component compSpec : sortedComponents) {
Component component = new Component(compSpec, allocateId, context);
componentsById.put(allocateId, component);
componentsByName.put(component.getName(), component);
allocateId++;
}
}
private final class ComponentEventHandler
implements EventHandler<ComponentEvent> {
@Override
public void handle(ComponentEvent event) {
Component component = componentsByName.get(event.getName());
if (component == null) {
LOG.error("No component exists for " + event.getName());
return;
}
try {
component.handle(event);
} catch (Throwable t) {
LOG.error(MessageFormat
.format("[COMPONENT {0}]: Error in handling event type {1}",
component.getName(), event.getType()), t);
}
}
}
private final class ComponentInstanceEventHandler
implements EventHandler<ComponentInstanceEvent> {
@Override
public void handle(ComponentInstanceEvent event) {
ComponentInstance instance =
liveInstances.get(event.getContainerId());
if (instance == null) {
LOG.error("No component instance exists for " + event.getContainerId());
return;
}
try {
instance.handle(event);
} catch (Throwable t) {
LOG.error(instance.getCompInstanceId() +
": Error in handling event type " + event.getType(), t);
}
}
}
class AMRMClientCallback extends AMRMClientAsync.AbstractCallbackHandler {
@Override
public void onContainersAllocated(List<Container> containers) {
LOG.info(containers.size() + " containers allocated. ");
for (Container container : containers) {
Component comp = componentsById.get(container.getAllocationRequestId());
ComponentEvent event =
new ComponentEvent(comp.getName(), CONTAINER_ALLOCATED)
.setContainer(container);
dispatcher.getEventHandler().handle(event);
Collection<AMRMClient.ContainerRequest> requests = amRMClient
.getMatchingRequests(container.getAllocationRequestId());
LOG.info("[COMPONENT {}]: {} outstanding container requests.",
comp.getName(), requests.size());
// remove the corresponding request
if (requests.iterator().hasNext()) {
LOG.info("[COMPONENT {}]: removing one container request.", comp
.getName());
AMRMClient.ContainerRequest request = requests.iterator().next();
amRMClient.removeContainerRequest(request);
}
}
}
@Override
public void onContainersCompleted(List<ContainerStatus> statuses) {
for (ContainerStatus status : statuses) {
ContainerId containerId = status.getContainerId();
ComponentInstance instance = liveInstances.get(status.getContainerId());
if (instance == null) {
LOG.warn(
"Container {} Completed. No component instance exists. exitStatus={}. diagnostics={} ",
containerId, status.getExitStatus(), status.getDiagnostics());
return;
}
ComponentEvent event =
new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED)
.setStatus(status).setInstance(instance);
dispatcher.getEventHandler().handle(event);
}
}
@Override
public void onContainersUpdated(List<UpdatedContainer> containers) {
}
@Override public void onShutdownRequest() {
//Was used for non-work-preserving restart in YARN, should be deprecated.
}
@Override public void onNodesUpdated(List<NodeReport> updatedNodes) {
StringBuilder str = new StringBuilder();
str.append("Nodes updated info: ").append(System.lineSeparator());
for (NodeReport report : updatedNodes) {
str.append(report.getNodeId()).append(", state = ")
.append(report.getNodeState()).append(", healthDiagnostics = ")
.append(report.getHealthReport()).append(System.lineSeparator());
}
LOG.warn(str.toString());
}
@Override public float getProgress() {
// get running containers over desired containers
long total = 0;
for (org.apache.hadoop.yarn.service.api.records.Component component : app
.getComponents()) {
total += component.getNumberOfContainers();
}
// Probably due to user flexed down to 0
if (total == 0) {
return 100;
}
return Math.max((float) liveInstances.size() / total * 100, 100);
}
@Override public void onError(Throwable e) {
LOG.error("Error in AMRMClient callback handler ", e);
}
}
private class NMClientCallback extends NMClientAsync.AbstractCallbackHandler {
@Override public void onContainerStarted(ContainerId containerId,
Map<String, ByteBuffer> allServiceResponse) {
ComponentInstance instance = liveInstances.get(containerId);
if (instance == null) {
LOG.error("No component instance exists for " + containerId);
return;
}
ComponentEvent event =
new ComponentEvent(instance.getCompName(), CONTAINER_STARTED)
.setInstance(instance);
dispatcher.getEventHandler().handle(event);
}
@Override public void onContainerStatusReceived(ContainerId containerId,
ContainerStatus containerStatus) {
}
@Override public void onContainerStopped(ContainerId containerId) {
}
@Override
public void onStartContainerError(ContainerId containerId, Throwable t) {
ComponentInstance instance = liveInstances.get(containerId);
if (instance == null) {
LOG.error("No component instance exists for " + containerId);
return;
}
LOG.error("Failed to start " + containerId, t);
amRMClient.releaseAssignedContainer(containerId);
// After container released, it'll get CONTAINER_COMPLETED event from RM
// automatically which will trigger stopping COMPONENT INSTANCE
}
@Override public void onContainerResourceIncreased(ContainerId containerId,
Resource resource) {
}
@Override public void onContainerResourceUpdated(ContainerId containerId,
Resource resource) {
}
@Override public void onGetContainerStatusError(ContainerId containerId,
Throwable t) {
}
@Override
public void onIncreaseContainerResourceError(ContainerId containerId,
Throwable t) {
}
@Override
public void onUpdateContainerResourceError(ContainerId containerId,
Throwable t) {
}
@Override
public void onStopContainerError(ContainerId containerId, Throwable t) {
}
}
public ServiceMetrics getServiceMetrics() {
return serviceMetrics;
}
public AMRMClientAsync<AMRMClient.ContainerRequest> getAmRMClient() {
return amRMClient;
}
public NMClientAsync getNmClient() {
return nmClient;
}
public void addLiveCompInstance(ContainerId containerId,
ComponentInstance instance) {
liveInstances.put(containerId, instance);
}
public void removeLiveCompInstance(ContainerId containerId) {
liveInstances.remove(containerId);
}
public AsyncDispatcher getCompInstanceDispatcher() {
return compInstanceDispatcher;
}
public YarnRegistryViewForProviders getYarnRegistryOperations() {
return yarnRegistryOperations;
}
public ServiceTimelinePublisher getServiceTimelinePublisher() {
return serviceTimelinePublisher;
}
public Map<ContainerId, ComponentInstance> getLiveInstances() {
return liveInstances;
}
public ContainerLaunchService getContainerLaunchService() {
return containerLaunchService;
}
public ServiceContext getContext() {
return context;
}
public Map<String, Component> getAllComponents() {
return componentsByName;
}
public Service getApp() {
return app;
}
public AsyncDispatcher getDispatcher() {
return dispatcher;
}
public BoundedAppender getDiagnostics() {
return diagnostics;
}
}

View File

@ -0,0 +1,74 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$;
/**
* This class defines constants that can be used in input spec for
* variable substitutions
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public interface ServiceApiConstants {
// Constants for service
String SERVICE_NAME = $("SERVICE_NAME");
String SERVICE_NAME_LC = $("SERVICE_NAME.lc");
String USER = $("USER");
String DOMAIN = $("DOMAIN");
// Constants for component
String COMPONENT_NAME = $("COMPONENT_NAME");
String COMPONENT_NAME_LC = $("COMPONENT_NAME.lc");
String COMPONENT_INSTANCE_NAME = $("COMPONENT_INSTANCE_NAME");
// Constants for component instance
String COMPONENT_ID = $("COMPONENT_ID");
String CONTAINER_ID = $("CONTAINER_ID");
// Templates for component instance host/IP
String COMPONENT_INSTANCE_HOST = $("%s_HOST");
String COMPONENT_INSTANCE_IP = $("%s_IP");
// Constants for default cluster ZK
String CLUSTER_ZK_QUORUM = $("CLUSTER_ZK_QUORUM");
// URI for the default cluster fs
String CLUSTER_FS_URI = $("CLUSTER_FS_URI");
// the host component of the cluster fs UI
String CLUSTER_FS_HOST = $("CLUSTER_FS_HOST");
// Path in zookeeper for a specific service
String SERVICE_ZK_PATH = $("SERVICE_ZK_PATH");
// Constants for service specific hdfs dir
String SERVICE_HDFS_DIR = $("SERVICE_HDFS_DIR");
}

View File

@ -0,0 +1,168 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.Serializable;
import java.util.Objects;
import javax.xml.bind.annotation.XmlEnum;
import javax.xml.bind.annotation.XmlType;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonValue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Artifact of an service component.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "Artifact of an service component")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Artifact implements Serializable {
private static final long serialVersionUID = 3608929500111099035L;
private String id = null;
/**
* Artifact Type. DOCKER, TARBALL or SERVICE
**/
@XmlType(name = "artifact_type")
@XmlEnum
public enum TypeEnum {
DOCKER("DOCKER"), TARBALL("TARBALL"), SERVICE("SERVICE");
private String value;
TypeEnum(String value) {
this.value = value;
}
@Override
@JsonValue
public String toString() {
return value;
}
}
private TypeEnum type = TypeEnum.DOCKER;
private String uri = null;
/**
* Artifact id. Examples are package location uri for tarball based services,
* image name for docker, etc.
**/
public Artifact id(String id) {
this.id = id;
return this;
}
@ApiModelProperty(example = "null", required = true, value = "Artifact id. Examples are package location uri for tarball based services, image name for docker, etc.")
@JsonProperty("id")
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
/**
* Artifact type, like docker, tarball, etc. (optional).
**/
public Artifact type(TypeEnum type) {
this.type = type;
return this;
}
@ApiModelProperty(example = "null", value = "Artifact type, like docker, tarball, etc. (optional).")
@JsonProperty("type")
public TypeEnum getType() {
return type;
}
public void setType(TypeEnum type) {
this.type = type;
}
/**
* Artifact location to support multiple artifact stores (optional).
**/
public Artifact uri(String uri) {
this.uri = uri;
return this;
}
@ApiModelProperty(example = "null", value = "Artifact location to support multiple artifact stores (optional).")
@JsonProperty("uri")
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Artifact artifact = (Artifact) o;
return Objects.equals(this.id, artifact.id)
&& Objects.equals(this.type, artifact.type)
&& Objects.equals(this.uri, artifact.uri);
}
@Override
public int hashCode() {
return Objects.hash(id, type, uri);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Artifact {\n");
sb.append(" id: ").append(toIndentedString(id)).append("\n");
sb.append(" type: ").append(toIndentedString(type)).append("\n");
sb.append(" uri: ").append(toIndentedString(uri)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.Serializable;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class BaseResource implements Serializable {
private static final long serialVersionUID = 1492603053176889431L;
private String uri;
/**
* Resource location for a service, e.g.
* /ws/v1/services/helloworld
*
**/
public String getUri() {
return uri;
}
public void setUri(String uri) {
this.uri = uri;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("BaseResource [uri=");
builder.append(uri);
builder.append("]");
return builder.toString();
}
}

View File

@ -0,0 +1,430 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* One or more components of the service. If the service is HBase say,
* then the component can be a simple role like master or regionserver. If the
* service is a complex business webapp then a component can be other
* services say Kafka or Storm. Thereby it opens up the support for complex
* and nested services.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@XmlRootElement
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Component implements Serializable {
private static final long serialVersionUID = -8430058381509087805L;
private String name = null;
private List<String> dependencies = new ArrayList<String>();
private ReadinessCheck readinessCheck = null;
private Artifact artifact = null;
private String launchCommand = null;
private Resource resource = null;
private Long numberOfContainers = null;
private Boolean runPrivilegedContainer = false;
private PlacementPolicy placementPolicy = null;
private ComponentState state = ComponentState.FLEXING;
private Configuration configuration = new Configuration();
private List<String> quicklinks = new ArrayList<String>();
private List<Container> containers =
Collections.synchronizedList(new ArrayList<Container>());
/**
* Name of the service component (mandatory).
**/
public Component name(String name) {
this.name = name;
return this;
}
@ApiModelProperty(example = "null", required = true, value = "Name of the service component (mandatory).")
@JsonProperty("name")
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/**
* An array of service components which should be in READY state (as
* defined by readiness check), before this component can be started. The
* dependencies across all components of a service should be represented
* as a DAG.
**/
public Component dependencies(List<String> dependencies) {
this.dependencies = dependencies;
return this;
}
@ApiModelProperty(example = "null", value = "An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of an service should be represented as a DAG.")
@JsonProperty("dependencies")
public List<String> getDependencies() {
return dependencies;
}
public void setDependencies(List<String> dependencies) {
this.dependencies = dependencies;
}
/**
* Readiness check for this component.
**/
public Component readinessCheck(ReadinessCheck readinessCheck) {
this.readinessCheck = readinessCheck;
return this;
}
@ApiModelProperty(example = "null", value = "Readiness check for this component.")
@JsonProperty("readiness_check")
public ReadinessCheck getReadinessCheck() {
return readinessCheck;
}
@XmlElement(name = "readiness_check")
public void setReadinessCheck(ReadinessCheck readinessCheck) {
this.readinessCheck = readinessCheck;
}
/**
* Artifact of the component (optional). If not specified, the service
* level global artifact takes effect.
**/
public Component artifact(Artifact artifact) {
this.artifact = artifact;
return this;
}
@ApiModelProperty(example = "null", value = "Artifact of the component (optional). If not specified, the service level global artifact takes effect.")
@JsonProperty("artifact")
public Artifact getArtifact() {
return artifact;
}
public void setArtifact(Artifact artifact) {
this.artifact = artifact;
}
/**
* The custom launch command of this component (optional). When specified at
* the component level, it overrides the value specified at the global level
* (if any).
**/
public Component launchCommand(String launchCommand) {
this.launchCommand = launchCommand;
return this;
}
@ApiModelProperty(example = "null", value = "The custom launch command of this component (optional). When specified at the component level, it overrides the value specified at the global level (if any).")
@JsonProperty("launch_command")
public String getLaunchCommand() {
return launchCommand;
}
@XmlElement(name = "launch_command")
public void setLaunchCommand(String launchCommand) {
this.launchCommand = launchCommand;
}
/**
* Resource of this component (optional). If not specified, the service
* level global resource takes effect.
**/
public Component resource(Resource resource) {
this.resource = resource;
return this;
}
@ApiModelProperty(example = "null", value = "Resource of this component (optional). If not specified, the service level global resource takes effect.")
@JsonProperty("resource")
public Resource getResource() {
return resource;
}
public void setResource(Resource resource) {
this.resource = resource;
}
/**
* Number of containers for this component (optional). If not specified,
* the service level global number_of_containers takes effect.
**/
public Component numberOfContainers(Long numberOfContainers) {
this.numberOfContainers = numberOfContainers;
return this;
}
@ApiModelProperty(example = "null", value = "Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.")
@JsonProperty("number_of_containers")
public Long getNumberOfContainers() {
return numberOfContainers;
}
@XmlElement(name = "number_of_containers")
public void setNumberOfContainers(Long numberOfContainers) {
this.numberOfContainers = numberOfContainers;
}
@ApiModelProperty(example = "null", value = "Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started service.")
@JsonProperty("containers")
public List<Container> getContainers() {
return containers;
}
public void setContainers(List<Container> containers) {
this.containers = containers;
}
public void addContainer(Container container) {
this.containers.add(container);
}
public void removeContainer(Container container) {
containers.remove(container);
}
public Container getContainer(String id) {
for (Container container : containers) {
if (container.getId().equals(id)) {
return container;
}
}
return null;
}
/**
* Run all containers of this component in privileged mode (YARN-4262).
**/
public Component runPrivilegedContainer(Boolean runPrivilegedContainer) {
this.runPrivilegedContainer = runPrivilegedContainer;
return this;
}
@ApiModelProperty(example = "null", value = "Run all containers of this component in privileged mode (YARN-4262).")
@JsonProperty("run_privileged_container")
public Boolean getRunPrivilegedContainer() {
return runPrivilegedContainer;
}
@XmlElement(name = "run_privileged_container")
public void setRunPrivilegedContainer(Boolean runPrivilegedContainer) {
this.runPrivilegedContainer = runPrivilegedContainer;
}
/**
* Advanced scheduling and placement policies for all containers of this
* component (optional). If not specified, the service level placement_policy
* takes effect. Refer to the description at the global level for more
* details.
**/
public Component placementPolicy(PlacementPolicy placementPolicy) {
this.placementPolicy = placementPolicy;
return this;
}
@ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details.")
@JsonProperty("placement_policy")
public PlacementPolicy getPlacementPolicy() {
return placementPolicy;
}
@XmlElement(name = "placement_policy")
public void setPlacementPolicy(PlacementPolicy placementPolicy) {
this.placementPolicy = placementPolicy;
}
/**
* Config properties for this component.
**/
public Component configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
@ApiModelProperty(example = "null", value = "Config properties for this component.")
@JsonProperty("configuration")
public Configuration getConfiguration() {
return configuration;
}
public void setConfiguration(Configuration configuration) {
this.configuration = configuration;
}
/**
* A list of quicklink keys defined at the service level, and to be
* resolved by this component.
**/
public Component quicklinks(List<String> quicklinks) {
this.quicklinks = quicklinks;
return this;
}
@ApiModelProperty(example = "null", value = "A list of quicklink keys defined at the service level, and to be resolved by this component.")
@JsonProperty("quicklinks")
public List<String> getQuicklinks() {
return quicklinks;
}
public void setQuicklinks(List<String> quicklinks) {
this.quicklinks = quicklinks;
}
public Component state(ComponentState state) {
this.state = state;
return this;
}
@ApiModelProperty(example = "null", value = "State of the component.")
@JsonProperty("state")
public ComponentState getState() {
return state;
}
public void setState(ComponentState state) {
this.state = state;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Component component = (Component) o;
return Objects.equals(this.name, component.name)
&& Objects.equals(this.dependencies, component.dependencies)
&& Objects.equals(this.readinessCheck, component.readinessCheck)
&& Objects.equals(this.artifact, component.artifact)
&& Objects.equals(this.launchCommand, component.launchCommand)
&& Objects.equals(this.resource, component.resource)
&& Objects.equals(this.numberOfContainers, component.numberOfContainers)
&& Objects.equals(this.runPrivilegedContainer,
component.runPrivilegedContainer)
&& Objects.equals(this.placementPolicy, component.placementPolicy)
&& Objects.equals(this.configuration, component.configuration)
&& Objects.equals(this.quicklinks, component.quicklinks)
&& Objects.equals(this.state, component.state);
}
@Override
public int hashCode() {
return Objects.hash(name, dependencies, readinessCheck, artifact,
launchCommand, resource, numberOfContainers,
runPrivilegedContainer, placementPolicy, configuration, quicklinks, state);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Component {\n");
sb.append(" name: ").append(toIndentedString(name)).append("\n");
sb.append(" state: ").append(toIndentedString(state)).append("\n");
sb.append(" dependencies: ").append(toIndentedString(dependencies))
.append("\n");
sb.append(" readinessCheck: ").append(toIndentedString(readinessCheck))
.append("\n");
sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n");
sb.append(" launchCommand: ").append(toIndentedString(launchCommand))
.append("\n");
sb.append(" resource: ").append(toIndentedString(resource)).append("\n");
sb.append(" numberOfContainers: ")
.append(toIndentedString(numberOfContainers)).append("\n");
sb.append(" containers: ").append(toIndentedString(containers))
.append("\n");
sb.append(" runPrivilegedContainer: ")
.append(toIndentedString(runPrivilegedContainer)).append("\n");
sb.append(" placementPolicy: ").append(toIndentedString(placementPolicy))
.append("\n");
sb.append(" configuration: ").append(toIndentedString(configuration))
.append("\n");
sb.append(" quicklinks: ").append(toIndentedString(quicklinks))
.append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
/**
* Merge from another component into this component without overwriting.
*/
public void mergeFrom(Component that) {
if (this.getArtifact() == null) {
this.setArtifact(that.getArtifact());
}
if (this.getResource() == null) {
this.setResource(that.getResource());
}
if (this.getNumberOfContainers() == null) {
this.setNumberOfContainers(that.getNumberOfContainers());
}
if (this.getLaunchCommand() == null) {
this.setLaunchCommand(that.getLaunchCommand());
}
this.getConfiguration().mergeFrom(that.getConfiguration());
if (this.getQuicklinks() == null) {
this.setQuicklinks(that.getQuicklinks());
}
if (this.getRunPrivilegedContainer() == null) {
this.setRunPrivilegedContainer(that.getRunPrivilegedContainer());
}
if (this.getDependencies() == null) {
this.setDependencies(that.getDependencies());
}
if (this.getPlacementPolicy() == null) {
this.setPlacementPolicy(that.getPlacementPolicy());
}
if (this.getReadinessCheck() == null) {
this.setReadinessCheck(that.getReadinessCheck());
}
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "The current state of a component.")
public enum ComponentState {
FLEXING, STABLE
}

View File

@ -0,0 +1,233 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonValue;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlEnum;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* A config file that needs to be created and made available as a volume in an
* service component container.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "A config file that needs to be created and made available as a volume in an service component container.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@XmlRootElement
@JsonInclude(JsonInclude.Include.NON_NULL)
public class ConfigFile implements Serializable {
private static final long serialVersionUID = -7009402089417704612L;
/**
* Config Type. XML, JSON, YAML, TEMPLATE and HADOOP_XML are supported.
**/
@XmlType(name = "config_type")
@XmlEnum
public enum TypeEnum {
XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
"TEMPLATE"), HADOOP_XML("HADOOP_XML"),;
private String value;
TypeEnum(String value) {
this.value = value;
}
@Override
@JsonValue
public String toString() {
return value;
}
}
private TypeEnum type = null;
private String destFile = null;
private String srcFile = null;
private Map<String, String> properties = new HashMap<>();
public ConfigFile copy() {
ConfigFile copy = new ConfigFile();
copy.setType(this.getType());
copy.setSrcFile(this.getSrcFile());
copy.setDestFile(this.getDestFile());
if (this.getProperties() != null && !this.getProperties().isEmpty()) {
copy.getProperties().putAll(this.getProperties());
}
return copy;
}
/**
* Config file in the standard format like xml, properties, json, yaml,
* template.
**/
public ConfigFile type(TypeEnum type) {
this.type = type;
return this;
}
@ApiModelProperty(example = "null", value = "Config file in the standard format like xml, properties, json, yaml, template.")
@JsonProperty("type")
public TypeEnum getType() {
return type;
}
public void setType(TypeEnum type) {
this.type = type;
}
/**
* The absolute path that this configuration file should be mounted as, in the
* service container.
**/
public ConfigFile destFile(String destFile) {
this.destFile = destFile;
return this;
}
@ApiModelProperty(example = "null", value = "The absolute path that this configuration file should be mounted as, in the service container.")
@JsonProperty("dest_file")
public String getDestFile() {
return destFile;
}
@XmlElement(name = "dest_file")
public void setDestFile(String destFile) {
this.destFile = destFile;
}
/**
* This provides the source location of the configuration file, the content
* of which is dumped to dest_file post property substitutions, in the format
* as specified in type. Typically the src_file would point to a source
* controlled network accessible file maintained by tools like puppet, chef,
* or hdfs etc. Currently, only hdfs is supported.
**/
public ConfigFile srcFile(String srcFile) {
this.srcFile = srcFile;
return this;
}
@ApiModelProperty(example = "null", value = "This provides the source location of the configuration file, "
+ "the content of which is dumped to dest_file post property substitutions, in the format as specified in type. "
+ "Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.")
@JsonProperty("src_file")
public String getSrcFile() {
return srcFile;
}
@XmlElement(name = "src_file")
public void setSrcFile(String srcFile) {
this.srcFile = srcFile;
}
/**
A blob of key value pairs that will be dumped in the dest_file in the format
as specified in type. If src_file is specified, src_file content are dumped
in the dest_file and these properties will overwrite, if any, existing
properties in src_file or be added as new properties in src_file.
**/
public ConfigFile properties(Map<String, String> properties) {
this.properties = properties;
return this;
}
@ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type."
+ " If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any,"
+ " existing properties in src_file or be added as new properties in src_file.")
@JsonProperty("properties")
public Map<String, String> getProperties() {
return properties;
}
public void setProperties(Map<String, String> properties) {
this.properties = properties;
}
public long getLong(String name, long defaultValue) {
if (name == null) {
return defaultValue;
}
String value = properties.get(name.trim());
return Long.parseLong(value);
}
public boolean getBoolean(String name, boolean defaultValue) {
if (name == null) {
return defaultValue;
}
return Boolean.valueOf(properties.get(name.trim()));
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ConfigFile configFile = (ConfigFile) o;
return Objects.equals(this.type, configFile.type)
&& Objects.equals(this.destFile, configFile.destFile)
&& Objects.equals(this.srcFile, configFile.srcFile);
}
@Override
public int hashCode() {
return Objects.hash(type, destFile, srcFile, properties);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class ConfigFile {\n");
sb.append(" type: ").append(toIndentedString(type)).append("\n");
sb.append(" destFile: ").append(toIndentedString(destFile)).append("\n");
sb.append(" srcFile: ").append(toIndentedString(srcFile)).append("\n");
sb.append(" properties: ").append(toIndentedString(properties)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.util.Locale;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum ConfigFormat {
JSON("json"),
PROPERTIES("properties"),
XML("xml"),
HADOOP_XML("hadoop_xml"),
ENV("env"),
TEMPLATE("template"),
YAML("yaml"),
;
ConfigFormat(String suffix) {
this.suffix = suffix;
}
private final String suffix;
public String getSuffix() {
return suffix;
}
@Override
public String toString() {
return suffix;
}
/**
* Get a matching format or null
* @param type
* @return the format
*/
public static ConfigFormat resolve(String type) {
for (ConfigFormat format: values()) {
if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) {
return format;
}
}
return null;
}
}

View File

@ -0,0 +1,225 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Set of configuration properties that can be injected into the service
* components via envs, files and custom pluggable helper docker containers.
* Files of several standard formats like xml, properties, json, yaml and
* templates will be supported.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Configuration implements Serializable {
private static final long serialVersionUID = -4330788704981074466L;
private Map<String, String> properties = new HashMap<String, String>();
private Map<String, String> env = new HashMap<String, String>();
private List<ConfigFile> files = new ArrayList<ConfigFile>();
/**
* A blob of key-value pairs of common service properties.
**/
public Configuration properties(Map<String, String> properties) {
this.properties = properties;
return this;
}
@ApiModelProperty(example = "null", value = "A blob of key-value pairs of common service properties.")
@JsonProperty("properties")
public Map<String, String> getProperties() {
return properties;
}
public void setProperties(Map<String, String> properties) {
this.properties = properties;
}
/**
* A blob of key-value pairs which will be appended to the default system
* properties and handed off to the service at start time. All placeholder
* references to properties will be substituted before injection.
**/
public Configuration env(Map<String, String> env) {
this.env = env;
return this;
}
@ApiModelProperty(example = "null", value = "A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.")
@JsonProperty("env")
public Map<String, String> getEnv() {
return env;
}
public void setEnv(Map<String, String> env) {
this.env = env;
}
/**
* Array of list of files that needs to be created and made available as
* volumes in the service component containers.
**/
public Configuration files(List<ConfigFile> files) {
this.files = files;
return this;
}
@ApiModelProperty(example = "null", value = "Array of list of files that needs to be created and made available as volumes in the service component containers.")
@JsonProperty("files")
public List<ConfigFile> getFiles() {
return files;
}
public void setFiles(List<ConfigFile> files) {
this.files = files;
}
public long getPropertyLong(String name, long defaultValue) {
String value = getProperty(name);
if (StringUtils.isEmpty(value)) {
return defaultValue;
}
return Long.parseLong(value);
}
public int getPropertyInt(String name, int defaultValue) {
String value = getProperty(name);
if (StringUtils.isEmpty(value)) {
return defaultValue;
}
return Integer.parseInt(value);
}
public boolean getPropertyBool(String name, boolean defaultValue) {
String value = getProperty(name);
if (StringUtils.isEmpty(value)) {
return defaultValue;
}
return Boolean.parseBoolean(value);
}
public String getProperty(String name, String defaultValue) {
String value = getProperty(name);
if (StringUtils.isEmpty(value)) {
return defaultValue;
}
return value;
}
public void setProperty(String name, String value) {
properties.put(name, value);
}
public String getProperty(String name) {
return properties.get(name.trim());
}
public String getEnv(String name) {
return env.get(name.trim());
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Configuration configuration = (Configuration) o;
return Objects.equals(this.properties, configuration.properties)
&& Objects.equals(this.env, configuration.env)
&& Objects.equals(this.files, configuration.files);
}
@Override
public int hashCode() {
return Objects.hash(properties, env, files);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Configuration {\n");
sb.append(" properties: ").append(toIndentedString(properties))
.append("\n");
sb.append(" env: ").append(toIndentedString(env)).append("\n");
sb.append(" files: ").append(toIndentedString(files)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
/**
* Merge all properties and envs from that configuration to this configration.
* For ConfigFiles, all properties and envs of that ConfigFile are merged into
* this ConfigFile.
*/
public synchronized void mergeFrom(Configuration that) {
ServiceUtils.mergeMapsIgnoreDuplicateKeys(this.properties, that
.getProperties());
ServiceUtils.mergeMapsIgnoreDuplicateKeys(this.env, that.getEnv());
Map<String, ConfigFile> thatMap = new HashMap<>();
for (ConfigFile file : that.getFiles()) {
thatMap.put(file.getDestFile(), file.copy());
}
for (ConfigFile thisFile : files) {
if(thatMap.containsKey(thisFile.getDestFile())) {
ConfigFile thatFile = thatMap.get(thisFile.getDestFile());
ServiceUtils.mergeMapsIgnoreDuplicateKeys(thisFile.getProperties(),
thatFile.getProperties());
thatMap.remove(thisFile.getDestFile());
}
}
// add remaining new files from that Configration
for (ConfigFile thatFile : thatMap.values()) {
files.add(thatFile.copy());
}
}
}

View File

@ -0,0 +1,298 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Date;
import java.util.Objects;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An instance of a running service container.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "An instance of a running service container")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@XmlRootElement
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Container extends BaseResource {
private static final long serialVersionUID = -8955788064529288L;
private String id = null;
private Date launchTime = null;
private String ip = null;
private String hostname = null;
private String bareHost = null;
private ContainerState state = null;
private String componentInstanceName = null;
private Resource resource = null;
private Artifact artifact = null;
private Boolean privilegedContainer = null;
/**
* Unique container id of a running service, e.g.
* container_e3751_1458061340047_0008_01_000002.
**/
public Container id(String id) {
this.id = id;
return this;
}
@ApiModelProperty(example = "null", value = "Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.")
@JsonProperty("id")
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
/**
* The time when the container was created, e.g. 2016-03-16T01:01:49.000Z.
* This will most likely be different from cluster launch time.
**/
public Container launchTime(Date launchTime) {
this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
return this;
}
@ApiModelProperty(example = "null", value = "The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.")
@JsonProperty("launch_time")
public Date getLaunchTime() {
return launchTime == null ? null : (Date) launchTime.clone();
}
@XmlElement(name = "launch_time")
public void setLaunchTime(Date launchTime) {
this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
}
/**
* IP address of a running container, e.g. 172.31.42.141. The IP address and
* hostname attribute values are dependent on the cluster/docker network setup
* as per YARN-4007.
**/
public Container ip(String ip) {
this.ip = ip;
return this;
}
@ApiModelProperty(example = "null", value = "IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.")
@JsonProperty("ip")
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
/**
* Fully qualified hostname of a running container, e.g.
* ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and
* hostname attribute values are dependent on the cluster/docker network setup
* as per YARN-4007.
**/
public Container hostname(String hostname) {
this.hostname = hostname;
return this;
}
@ApiModelProperty(example = "null", value = "Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.")
@JsonProperty("hostname")
public String getHostname() {
return hostname;
}
public void setHostname(String hostname) {
this.hostname = hostname;
}
/**
* The bare node or host in which the container is running, e.g.
* cn008.example.com.
**/
public Container bareHost(String bareHost) {
this.bareHost = bareHost;
return this;
}
@ApiModelProperty(example = "null", value = "The bare node or host in which the container is running, e.g. cn008.example.com.")
@JsonProperty("bare_host")
public String getBareHost() {
return bareHost;
}
@XmlElement(name = "bare_host")
public void setBareHost(String bareHost) {
this.bareHost = bareHost;
}
/**
* State of the container of an service.
**/
public Container state(ContainerState state) {
this.state = state;
return this;
}
@ApiModelProperty(example = "null", value = "State of the container of an service.")
@JsonProperty("state")
public ContainerState getState() {
return state;
}
public void setState(ContainerState state) {
this.state = state;
}
/**
* Name of the component that this container instance belongs to.
**/
public Container componentName(String componentName) {
this.componentInstanceName = componentName;
return this;
}
@ApiModelProperty(example = "null", value = "Name of the component that this container instance belongs to.")
@JsonProperty("component_name")
public String getComponentInstanceName() {
return componentInstanceName;
}
@XmlElement(name = "component_name")
public void setComponentInstanceName(String componentInstanceName) {
this.componentInstanceName = componentInstanceName;
}
/**
* Resource used for this container.
**/
public Container resource(Resource resource) {
this.resource = resource;
return this;
}
@ApiModelProperty(example = "null", value = "Resource used for this container.")
@JsonProperty("resource")
public Resource getResource() {
return resource;
}
public void setResource(Resource resource) {
this.resource = resource;
}
/**
* Artifact used for this container.
**/
public Container artifact(Artifact artifact) {
this.artifact = artifact;
return this;
}
@ApiModelProperty(example = "null", value = "Artifact used for this container.")
@JsonProperty("artifact")
public Artifact getArtifact() {
return artifact;
}
public void setArtifact(Artifact artifact) {
this.artifact = artifact;
}
/**
* Container running in privileged mode or not.
**/
public Container privilegedContainer(Boolean privilegedContainer) {
this.privilegedContainer = privilegedContainer;
return this;
}
@ApiModelProperty(example = "null", value = "Container running in privileged mode or not.")
@JsonProperty("privileged_container")
public Boolean getPrivilegedContainer() {
return privilegedContainer;
}
public void setPrivilegedContainer(Boolean privilegedContainer) {
this.privilegedContainer = privilegedContainer;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Container container = (Container) o;
return Objects.equals(this.id, container.id);
}
@Override
public int hashCode() {
return Objects.hash(id);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Container {\n");
sb.append(" id: ").append(toIndentedString(id)).append("\n");
sb.append(" launchTime: ").append(toIndentedString(launchTime))
.append("\n");
sb.append(" ip: ").append(toIndentedString(ip)).append("\n");
sb.append(" hostname: ").append(toIndentedString(hostname)).append("\n");
sb.append(" bareHost: ").append(toIndentedString(bareHost)).append("\n");
sb.append(" state: ").append(toIndentedString(state)).append("\n");
sb.append(" componentInstanceName: ").append(toIndentedString(
componentInstanceName))
.append("\n");
sb.append(" resource: ").append(toIndentedString(resource)).append("\n");
sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n");
sb.append(" privilegedContainer: ")
.append(toIndentedString(privilegedContainer)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The current state of the container of an application.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum ContainerState {
RUNNING_BUT_UNREADY, READY, STOPPED
}

View File

@ -0,0 +1,129 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModelProperty;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Unstable
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
public class Error {
private Integer code = null;
private String message = null;
private String fields = null;
/**
**/
public Error code(Integer code) {
this.code = code;
return this;
}
@ApiModelProperty(example = "null", value = "")
@JsonProperty("code")
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
/**
**/
public Error message(String message) {
this.message = message;
return this;
}
@ApiModelProperty(example = "null", value = "")
@JsonProperty("message")
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
/**
**/
public Error fields(String fields) {
this.fields = fields;
return this;
}
@ApiModelProperty(example = "null", value = "")
@JsonProperty("fields")
public String getFields() {
return fields;
}
public void setFields(String fields) {
this.fields = fields;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Error error = (Error) o;
return Objects.equals(this.code, error.code)
&& Objects.equals(this.message, error.message)
&& Objects.equals(this.fields, error.fields);
}
@Override
public int hashCode() {
return Objects.hash(code, message, fields);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Error {\n");
sb.append(" code: ").append(toIndentedString(code)).append("\n");
sb.append(" message: ").append(toIndentedString(message)).append("\n");
sb.append(" fields: ").append(toIndentedString(fields)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,102 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.Serializable;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Placement policy of an instance of an service. This feature is in the
* works in YARN-4902.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "Placement policy of an instance of an service. This feature is in the works in YARN-4902.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
public class PlacementPolicy implements Serializable {
private static final long serialVersionUID = 4341110649551172231L;
private String label = null;
/**
* Assigns a service to a named partition of the cluster where the service
* desires to run (optional). If not specified all services are submitted to
* a default label of the service owner. One or more labels can be setup for
* each service owner account with required constraints like no-preemption,
* sla-99999, preemption-ok, etc.
**/
public PlacementPolicy label(String label) {
this.label = label;
return this;
}
@ApiModelProperty(example = "null", value = "Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.")
@JsonProperty("label")
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PlacementPolicy placementPolicy = (PlacementPolicy) o;
return Objects.equals(this.label, placementPolicy.label);
}
@Override
public int hashCode() {
return Objects.hash(label);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class PlacementPolicy {\n");
sb.append(" label: ").append(toIndentedString(label)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,183 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import javax.xml.bind.annotation.XmlEnum;
import javax.xml.bind.annotation.XmlType;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonValue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A custom command or a pluggable helper container to determine the readiness
* of a container of a component. Readiness for every service is different.
* Hence the need for a simple interface, with scope to support advanced
* usecases.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
public class ReadinessCheck implements Serializable {
private static final long serialVersionUID = -3836839816887186801L;
/**
* Type. HTTP and PORT
**/
@XmlType(name = "type")
@XmlEnum
public enum TypeEnum {
HTTP("HTTP"),
PORT("PORT");
private String value;
TypeEnum(String value) {
this.value = value;
}
@Override
@JsonValue
public String toString() {
return value;
}
}
private TypeEnum type = null;
private Map<String, String> properties = new HashMap<String, String>();
private Artifact artifact = null;
/**
* E.g. HTTP (YARN will perform a simple REST call at a regular interval and
* expect a 204 No content).
**/
public ReadinessCheck type(TypeEnum type) {
this.type = type;
return this;
}
@ApiModelProperty(example = "null", value = "E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).")
@JsonProperty("type")
public TypeEnum getType() {
return type;
}
public void setType(TypeEnum type) {
this.type = type;
}
public ReadinessCheck properties(Map<String, String> properties) {
this.properties = properties;
return this;
}
public ReadinessCheck putPropsItem(String key, String propsItem) {
this.properties.put(key, propsItem);
return this;
}
/**
* A blob of key value pairs that will be used to configure the check.
* @return properties
**/
@ApiModelProperty(example = "null", value = "A blob of key value pairs that will be used to configure the check.")
public Map<String, String> getProperties() {
return properties;
}
public void setProperties(Map<String, String> properties) {
this.properties = properties;
}
/**
* Artifact of the pluggable readiness check helper container (optional). If
* specified, this helper container typically hosts the http uri and
* encapsulates the complex scripts required to perform actual container
* readiness check. At the end it is expected to respond a 204 No content just
* like the simplified use case. This pluggable framework benefits service
* owners who can run services without any packaging modifications. Note,
* artifacts of type docker only is supported for now.
**/
public ReadinessCheck artifact(Artifact artifact) {
this.artifact = artifact;
return this;
}
@ApiModelProperty(example = "null", value = "Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now.")
@JsonProperty("artifact")
public Artifact getArtifact() {
return artifact;
}
public void setArtifact(Artifact artifact) {
this.artifact = artifact;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ReadinessCheck readinessCheck = (ReadinessCheck) o;
return Objects.equals(this.type, readinessCheck.type) &&
Objects.equals(this.properties, readinessCheck.properties) &&
Objects.equals(this.artifact, readinessCheck.artifact);
}
@Override
public int hashCode() {
return Objects.hash(type, properties, artifact);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class ReadinessCheck {\n");
sb.append(" type: ").append(toIndentedString(type)).append("\n");
sb.append(" properties: ").append(toIndentedString(properties)).append("\n");
sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,161 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Resource determines the amount of resources (vcores, memory, network, etc.)
* usable by a container. This field determines the resource to be applied for
* all the containers of a component or service. The resource specified at
* the service (or global) level can be overriden at the component level. Only one
* of profile OR cpu &amp; memory are expected. It raises a validation
* exception otherwise.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
public class Resource extends BaseResource implements Cloneable {
private static final long serialVersionUID = -6431667797380250037L;
private String profile = null;
private Integer cpus = 1;
private String memory = null;
/**
* Each resource profile has a unique id which is associated with a
* cluster-level predefined memory, cpus, etc.
**/
public Resource profile(String profile) {
this.profile = profile;
return this;
}
@ApiModelProperty(example = "null", value = "Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.")
@JsonProperty("profile")
public String getProfile() {
return profile;
}
public void setProfile(String profile) {
this.profile = profile;
}
/**
* Amount of vcores allocated to each container (optional but overrides cpus
* in profile if specified).
**/
public Resource cpus(Integer cpus) {
this.cpus = cpus;
return this;
}
@ApiModelProperty(example = "null", value = "Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).")
@JsonProperty("cpus")
public Integer getCpus() {
return cpus;
}
public void setCpus(Integer cpus) {
this.cpus = cpus;
}
/**
* Amount of memory allocated to each container (optional but overrides memory
* in profile if specified). Currently accepts only an integer value and
* default unit is in MB.
**/
public Resource memory(String memory) {
this.memory = memory;
return this;
}
@ApiModelProperty(example = "null", value = "Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.")
@JsonProperty("memory")
public String getMemory() {
return memory;
}
public void setMemory(String memory) {
this.memory = memory;
}
@JsonIgnore
public long getMemoryMB() {
if (this.memory == null) {
return 0;
}
return Long.parseLong(memory);
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Resource resource = (Resource) o;
return Objects.equals(this.profile, resource.profile)
&& Objects.equals(this.cpus, resource.cpus)
&& Objects.equals(this.memory, resource.memory);
}
@Override
public int hashCode() {
return Objects.hash(profile, cpus, memory);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Resource {\n");
sb.append(" profile: ").append(toIndentedString(profile)).append("\n");
sb.append(" cpus: ").append(toIndentedString(cpus)).append("\n");
sb.append(" memory: ").append(toIndentedString(memory)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone();
}
}

View File

@ -0,0 +1,390 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* An Service resource has the following attributes.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "An Service resource has the following attributes.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@XmlRootElement
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({ "name", "state", "resource", "number_of_containers",
"lifetime", "containers" })
public class Service extends BaseResource {
private static final long serialVersionUID = -4491694636566094885L;
private String name = null;
private String id = null;
private Artifact artifact = null;
private Resource resource = null;
private Date launchTime = null;
private Long numberOfRunningContainers = null;
private Long lifetime = null;
private PlacementPolicy placementPolicy = null;
private List<Component> components = new ArrayList<>();
private Configuration configuration = new Configuration();
private ServiceState state = null;
private Map<String, String> quicklinks = new HashMap<>();
private String queue = null;
/**
* A unique service name.
**/
public Service name(String name) {
this.name = name;
return this;
}
@ApiModelProperty(example = "null", required = true, value = "A unique service name.")
@JsonProperty("name")
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/**
* A unique service id.
**/
public Service id(String id) {
this.id = id;
return this;
}
@ApiModelProperty(example = "null", value = "A unique service id.")
@JsonProperty("id")
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
/**
* Artifact of single-component services. Mandatory if components
* attribute is not specified.
**/
public Service artifact(Artifact artifact) {
this.artifact = artifact;
return this;
}
@ApiModelProperty(example = "null", value = "Artifact of single-component services. Mandatory if components attribute is not specified.")
@JsonProperty("artifact")
public Artifact getArtifact() {
return artifact;
}
public void setArtifact(Artifact artifact) {
this.artifact = artifact;
}
/**
* Resource of single-component services or the global default for
* multi-component services. Mandatory if it is a single-component
* service and if cpus and memory are not specified at the Service
* level.
**/
public Service resource(Resource resource) {
this.resource = resource;
return this;
}
@ApiModelProperty(example = "null", value = "Resource of single-component services or the global default for multi-component services. Mandatory if it is a single-component service and if cpus and memory are not specified at the Service level.")
@JsonProperty("resource")
public Resource getResource() {
return resource;
}
public void setResource(Resource resource) {
this.resource = resource;
}
/**
* The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.
**/
public Service launchTime(Date launchTime) {
this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
return this;
}
@ApiModelProperty(example = "null", value = "The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.")
@JsonProperty("launch_time")
public Date getLaunchTime() {
return launchTime == null ? null : (Date) launchTime.clone();
}
@XmlElement(name = "launch_time")
public void setLaunchTime(Date launchTime) {
this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
}
/**
* In get response this provides the total number of running containers for
* this service (across all components) at the time of request. Note, a
* subsequent request can return a different number as and when more
* containers get allocated until it reaches the total number of containers or
* if a flex request has been made between the two requests.
**/
public Service numberOfRunningContainers(Long numberOfRunningContainers) {
this.numberOfRunningContainers = numberOfRunningContainers;
return this;
}
@ApiModelProperty(example = "null", value = "In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.")
@JsonProperty("number_of_running_containers")
public Long getNumberOfRunningContainers() {
return numberOfRunningContainers;
}
@XmlElement(name = "number_of_running_containers")
public void setNumberOfRunningContainers(Long numberOfRunningContainers) {
this.numberOfRunningContainers = numberOfRunningContainers;
}
/**
* Life time (in seconds) of the service from the time it reaches the
* RUNNING_BUT_UNREADY state (after which it is automatically destroyed by YARN). For
* unlimited lifetime do not set a lifetime value.
**/
public Service lifetime(Long lifetime) {
this.lifetime = lifetime;
return this;
}
@ApiModelProperty(example = "null", value = "Life time (in seconds) of the service from the time it reaches the RUNNING_BUT_UNREADY state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.")
@JsonProperty("lifetime")
public Long getLifetime() {
return lifetime;
}
public void setLifetime(Long lifetime) {
this.lifetime = lifetime;
}
/**
* Advanced scheduling and placement policies (optional). If not specified, it
* defaults to the default placement policy of the service owner. The design of
* placement policies are in the works. It is not very clear at this point,
* how policies in conjunction with labels be exposed to service owners.
* This is a placeholder for now. The advanced structure of this attribute
* will be determined by YARN-4902.
**/
public Service placementPolicy(PlacementPolicy placementPolicy) {
this.placementPolicy = placementPolicy;
return this;
}
@ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies (optional). If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.")
@JsonProperty("placement_policy")
public PlacementPolicy getPlacementPolicy() {
return placementPolicy;
}
@XmlElement(name = "placement_policy")
public void setPlacementPolicy(PlacementPolicy placementPolicy) {
this.placementPolicy = placementPolicy;
}
/**
* Components of an service.
**/
public Service components(List<Component> components) {
this.components = components;
return this;
}
@ApiModelProperty(example = "null", value = "Components of an service.")
@JsonProperty("components")
public List<Component> getComponents() {
return components;
}
public void setComponents(List<Component> components) {
this.components = components;
}
public void addComponent(Component component) {
components.add(component);
}
public Component getComponent(String name) {
for (Component component : components) {
if (component.getName().equals(name)) {
return component;
}
}
return null;
}
/**
* Config properties of an service. Configurations provided at the
* service/global level are available to all the components. Specific
* properties can be overridden at the component level.
**/
public Service configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
@ApiModelProperty(example = "null", value = "Config properties of an service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.")
@JsonProperty("configuration")
public Configuration getConfiguration() {
return configuration;
}
public void setConfiguration(Configuration configuration) {
this.configuration = configuration;
}
/**
* State of the service. Specifying a value for this attribute for the
* POST payload raises a validation error. This attribute is available only in
* the GET response of a started service.
**/
public Service state(ServiceState state) {
this.state = state;
return this;
}
@ApiModelProperty(example = "null", value = "State of the service. Specifying a value for this attribute for the POST payload raises a validation error. This attribute is available only in the GET response of a started service.")
@JsonProperty("state")
public ServiceState getState() {
return state;
}
public void setState(ServiceState state) {
this.state = state;
}
/**
* A blob of key-value pairs of quicklinks to be exported for an service.
**/
public Service quicklinks(Map<String, String> quicklinks) {
this.quicklinks = quicklinks;
return this;
}
@ApiModelProperty(example = "null", value = "A blob of key-value pairs of quicklinks to be exported for an service.")
@JsonProperty("quicklinks")
public Map<String, String> getQuicklinks() {
return quicklinks;
}
public void setQuicklinks(Map<String, String> quicklinks) {
this.quicklinks = quicklinks;
}
/**
* The YARN queue that this service should be submitted to.
**/
public Service queue(String queue) {
this.queue = queue;
return this;
}
@ApiModelProperty(example = "null", value = "The YARN queue that this service should be submitted to.")
@JsonProperty("queue")
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Service service = (Service) o;
return Objects.equals(this.name, service.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Service {\n");
sb.append(" name: ").append(toIndentedString(name)).append("\n");
sb.append(" id: ").append(toIndentedString(id)).append("\n");
sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n");
sb.append(" resource: ").append(toIndentedString(resource)).append("\n");
sb.append(" launchTime: ").append(toIndentedString(launchTime))
.append("\n");
sb.append(" numberOfRunningContainers: ")
.append(toIndentedString(numberOfRunningContainers)).append("\n");
sb.append(" lifetime: ").append(toIndentedString(lifetime)).append("\n");
sb.append(" placementPolicy: ").append(toIndentedString(placementPolicy))
.append("\n");
sb.append(" components: ").append(toIndentedString(components))
.append("\n");
sb.append(" configuration: ").append(toIndentedString(configuration))
.append("\n");
sb.append(" state: ").append(toIndentedString(state)).append("\n");
sb.append(" quicklinks: ").append(toIndentedString(quicklinks))
.append("\n");
sb.append(" queue: ").append(toIndentedString(queue)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The current state of an service.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "The current state of an service.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
public enum ServiceState {
ACCEPTED, STARTED, STABLE, STOPPED, FAILED;
}

View File

@ -0,0 +1,148 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.api.records;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Objects;
import javax.xml.bind.annotation.XmlRootElement;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The current status of a submitted service, returned as a response to the
* GET API.
**/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@ApiModel(description = "The current status of a submitted service, returned as a response to the GET API.")
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
@XmlRootElement
@JsonInclude(JsonInclude.Include.NON_NULL)
public class ServiceStatus extends BaseResource {
private static final long serialVersionUID = -3469885905347851034L;
private String diagnostics = null;
private ServiceState state = null;
private Integer code = null;
/**
* Diagnostic information (if any) for the reason of the current state of the
* service. It typically has a non-null value, if the service is in a
* non-running state.
**/
public ServiceStatus diagnostics(String diagnostics) {
this.diagnostics = diagnostics;
return this;
}
@ApiModelProperty(example = "null", value = "Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.")
@JsonProperty("diagnostics")
public String getDiagnostics() {
return diagnostics;
}
public void setDiagnostics(String diagnostics) {
this.diagnostics = diagnostics;
}
/**
* Service state.
**/
public ServiceStatus state(ServiceState state) {
this.state = state;
return this;
}
@ApiModelProperty(example = "null", value = "Service state.")
@JsonProperty("state")
public ServiceState getState() {
return state;
}
public void setState(ServiceState state) {
this.state = state;
}
/**
* An error code specific to a scenario which service owners should be able to use
* to understand the failure in addition to the diagnostic information.
**/
public ServiceStatus code(Integer code) {
this.code = code;
return this;
}
@ApiModelProperty(example = "null", value = "An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.")
@JsonProperty("code")
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ServiceStatus serviceStatus = (ServiceStatus) o;
return Objects.equals(this.diagnostics, serviceStatus.diagnostics)
&& Objects.equals(this.state, serviceStatus.state)
&& Objects.equals(this.code, serviceStatus.code);
}
@Override
public int hashCode() {
return Objects.hash(diagnostics, state, code);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class ServiceStatus {\n");
sb.append(" diagnostics: ").append(toIndentedString(diagnostics))
.append("\n");
sb.append(" state: ").append(toIndentedString(state)).append("\n");
sb.append(" code: ").append(toIndentedString(code)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}

View File

@ -0,0 +1,57 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.ServerProxy;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
import java.net.InetSocketAddress;
import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
public class ClientAMProxy extends ServerProxy{
public static <T> T createProxy(final Configuration conf,
final Class<T> protocol, final UserGroupInformation ugi,
final YarnRPC rpc, final InetSocketAddress serverAddress) {
Configuration confClone = new Configuration(conf);
confClone.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
confClone.setInt(CommonConfigurationKeysPublic.
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 0);
RetryPolicy retryPolicy;
if (conf.getLong(YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS, 0) == 0) {
// by default no retry
retryPolicy = TRY_ONCE_THEN_FAIL;
} else {
retryPolicy =
createRetryPolicy(conf, YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS,
15 * 60 * 1000, YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS,
2 * 1000);
}
return createRetriableProxy(confClone, protocol, ugi, rpc, serverAddress,
retryPolicy);
}
}

View File

@ -0,0 +1,960 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.client;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryNTimes;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.registry.client.api.RegistryOperationsFactory;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
import org.apache.hadoop.yarn.api.records.*;
import org.apache.hadoop.yarn.client.api.AppAdminClient;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto;
import org.apache.hadoop.yarn.service.ClientAMProtocol;
import org.apache.hadoop.yarn.service.ServiceMaster;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.ServiceState;
import org.apache.hadoop.yarn.service.conf.SliderExitCodes;
import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
import org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor;
import org.apache.hadoop.yarn.service.containerlaunch.JavaCommandLineBuilder;
import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
import org.apache.hadoop.yarn.service.exceptions.BadConfigException;
import org.apache.hadoop.yarn.service.exceptions.SliderException;
import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
import org.apache.hadoop.yarn.service.provider.ProviderUtils;
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.utils.ZookeeperUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.Times;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.text.MessageFormat;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import static org.apache.hadoop.yarn.api.records.YarnApplicationState.*;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser;
import static org.apache.hadoop.yarn.service.utils.ServiceUtils.*;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class ServiceClient extends AppAdminClient implements SliderExitCodes,
YarnServiceConstants {
private static final Logger LOG =
LoggerFactory.getLogger(ServiceClient.class);
private SliderFileSystem fs;
//TODO disable retry so that client / rest API doesn't block?
protected YarnClient yarnClient;
// Avoid looking up applicationId from fs all the time.
private Map<String, ApplicationId> cachedAppIds = new ConcurrentHashMap<>();
private RegistryOperations registryClient;
private CuratorFramework curatorClient;
private YarnRPC rpc;
private static EnumSet<YarnApplicationState> terminatedStates =
EnumSet.of(FINISHED, FAILED, KILLED);
private static EnumSet<YarnApplicationState> liveStates =
EnumSet.of(NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING);
private static EnumSet<YarnApplicationState> preRunningStates =
EnumSet.of(NEW, NEW_SAVING, SUBMITTED, ACCEPTED);
@Override protected void serviceInit(Configuration configuration)
throws Exception {
fs = new SliderFileSystem(configuration);
yarnClient = YarnClient.createYarnClient();
rpc = YarnRPC.create(configuration);
addService(yarnClient);
super.serviceInit(configuration);
}
@Override
protected void serviceStop() throws Exception {
if (registryClient != null) {
registryClient.stop();
}
super.serviceStop();
}
public Service loadAppJsonFromLocalFS(String fileName, String serviceName,
Long lifetime, String queue) throws IOException, YarnException {
File file = new File(fileName);
if (!file.exists() && fileName.equals(file.getName())) {
String examplesDirStr = System.getenv("YARN_SERVICE_EXAMPLES_DIR");
String[] examplesDirs;
if (examplesDirStr == null) {
String yarnHome = System
.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
examplesDirs = new String[]{
yarnHome + "/share/hadoop/yarn/yarn-service-examples",
yarnHome + "/yarn-service-examples"
};
} else {
examplesDirs = StringUtils.split(examplesDirStr, ":");
}
for (String dir : examplesDirs) {
file = new File(MessageFormat.format("{0}/{1}/{2}.json",
dir, fileName, fileName));
if (file.exists()) {
break;
}
// Then look for secondary location.
file = new File(MessageFormat.format("{0}/{1}.json",
dir, fileName));
if (file.exists()) {
break;
}
}
}
if (!file.exists()) {
throw new YarnException("File or example could not be found: " +
fileName);
}
Path filePath = new Path(file.getAbsolutePath());
LOG.info("Loading service definition from local FS: " + filePath);
Service service = jsonSerDeser
.load(FileSystem.getLocal(getConfig()), filePath);
if (!StringUtils.isEmpty(serviceName)) {
service.setName(serviceName);
}
if (lifetime != null && lifetime > 0) {
service.setLifetime(lifetime);
}
if (!StringUtils.isEmpty(queue)) {
service.setQueue(queue);
}
return service;
}
public int actionSave(String fileName, String serviceName, Long lifetime,
String queue) throws IOException, YarnException {
return actionBuild(loadAppJsonFromLocalFS(fileName, serviceName,
lifetime, queue));
}
public int actionBuild(Service service)
throws YarnException, IOException {
Path appDir = checkAppNotExistOnHdfs(service);
ServiceApiUtil.validateAndResolveService(service, fs, getConfig());
createDirAndPersistApp(appDir, service);
return EXIT_SUCCESS;
}
public int actionLaunch(String fileName, String serviceName, Long lifetime,
String queue) throws IOException, YarnException {
actionCreate(loadAppJsonFromLocalFS(fileName, serviceName, lifetime,
queue));
return EXIT_SUCCESS;
}
public ApplicationId actionCreate(Service service)
throws IOException, YarnException {
String serviceName = service.getName();
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
ServiceApiUtil.validateAndResolveService(service, fs, getConfig());
verifyNoLiveAppInRM(serviceName, "create");
Path appDir = checkAppNotExistOnHdfs(service);
// Write the definition first and then submit - AM will read the definition
createDirAndPersistApp(appDir, service);
ApplicationId appId = submitApp(service);
cachedAppIds.put(serviceName, appId);
service.setId(appId.toString());
// update app definition with appId
persistAppDef(appDir, service);
return appId;
}
public int actionFlex(String serviceName, Map<String, String>
componentCountStrings) throws YarnException, IOException {
Map<String, Long> componentCounts =
new HashMap<>(componentCountStrings.size());
Service persistedService =
ServiceApiUtil.loadService(fs, serviceName);
if (!StringUtils.isEmpty(persistedService.getId())) {
cachedAppIds.put(persistedService.getName(),
ApplicationId.fromString(persistedService.getId()));
} else {
throw new YarnException(persistedService.getName()
+ " appId is null, may be not submitted to YARN yet");
}
for (Map.Entry<String, String> entry : componentCountStrings.entrySet()) {
String compName = entry.getKey();
ServiceApiUtil.validateNameFormat(compName, getConfig());
Component component = persistedService.getComponent(compName);
if (component == null) {
throw new IllegalArgumentException(entry.getKey() + " does not exist !");
}
long numberOfContainers =
parseNumberOfContainers(component, entry.getValue());
componentCounts.put(compName, numberOfContainers);
}
flexComponents(serviceName, componentCounts, persistedService);
return EXIT_SUCCESS;
}
// Parse the number of containers requested by user, e.g.
// +5 means add 5 additional containers
// -5 means reduce 5 containers, if it goes to negative, sets it to 0
// 5 means sets it to 5 containers.
private long parseNumberOfContainers(Component component, String newNumber) {
long orig = component.getNumberOfContainers();
if (newNumber.startsWith("+")) {
return orig + Long.parseLong(newNumber.substring(1));
} else if (newNumber.startsWith("-")) {
long ret = orig - Long.parseLong(newNumber.substring(1));
if (ret < 0) {
LOG.warn(MessageFormat.format(
"[COMPONENT {}]: component count goes to negative ({}{} = {}), reset it to 0.",
component.getName(), orig, newNumber, ret));
ret = 0;
}
return ret;
} else {
return Long.parseLong(newNumber);
}
}
// Called by Rest Service
public Map<String, Long> flexByRestService(String serviceName,
Map<String, Long> componentCounts) throws YarnException, IOException {
// load app definition
Service persistedService = ServiceApiUtil.loadService(fs, serviceName);
if (StringUtils.isEmpty(persistedService.getId())) {
throw new YarnException(
serviceName + " appId is null, may be not submitted to YARN yet");
}
cachedAppIds.put(persistedService.getName(),
ApplicationId.fromString(persistedService.getId()));
return flexComponents(serviceName, componentCounts, persistedService);
}
private Map<String, Long> flexComponents(String serviceName,
Map<String, Long> componentCounts, Service persistedService)
throws YarnException, IOException {
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
Map<String, Long> original = new HashMap<>(componentCounts.size());
ComponentCountProto.Builder countBuilder = ComponentCountProto.newBuilder();
FlexComponentsRequestProto.Builder requestBuilder =
FlexComponentsRequestProto.newBuilder();
for (Component persistedComp : persistedService.getComponents()) {
String name = persistedComp.getName();
if (componentCounts.containsKey(persistedComp.getName())) {
original.put(name, persistedComp.getNumberOfContainers());
persistedComp.setNumberOfContainers(componentCounts.get(name));
// build the request
countBuilder.setName(persistedComp.getName())
.setNumberOfContainers(persistedComp.getNumberOfContainers());
requestBuilder.addComponents(countBuilder.build());
}
}
if (original.size() < componentCounts.size()) {
componentCounts.keySet().removeAll(original.keySet());
throw new YarnException("Components " + componentCounts.keySet()
+ " do not exist in app definition.");
}
jsonSerDeser
.save(fs.getFileSystem(), ServiceApiUtil.getServiceJsonPath(fs, serviceName),
persistedService, true);
ApplicationReport appReport =
yarnClient.getApplicationReport(getAppId(serviceName));
if (appReport.getYarnApplicationState() != RUNNING) {
String message =
serviceName + " is at " + appReport.getYarnApplicationState()
+ " state, flex can only be invoked when service is running";
LOG.error(message);
throw new YarnException(message);
}
if (StringUtils.isEmpty(appReport.getHost())) {
throw new YarnException(serviceName + " AM hostname is empty");
}
ClientAMProtocol proxy =
createAMProxy(appReport.getHost(), appReport.getRpcPort());
proxy.flexComponents(requestBuilder.build());
for (Map.Entry<String, Long> entry : original.entrySet()) {
LOG.info("[COMPONENT {}]: number of containers changed from {} to {}",
entry.getKey(), entry.getValue(),
componentCounts.get(entry.getKey()));
}
return original;
}
public int actionStop(String serviceName)
throws YarnException, IOException {
return actionStop(serviceName, true);
}
public int actionStop(String serviceName, boolean waitForAppStopped)
throws YarnException, IOException {
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
ApplicationId currentAppId = getAppId(serviceName);
ApplicationReport report = yarnClient.getApplicationReport(currentAppId);
if (terminatedStates.contains(report.getYarnApplicationState())) {
LOG.info("Service {} is already in a terminated state {}", serviceName,
report.getYarnApplicationState());
return EXIT_SUCCESS;
}
if (preRunningStates.contains(report.getYarnApplicationState())) {
String msg = serviceName + " is at " + report.getYarnApplicationState()
+ ", forcefully killed by user!";
yarnClient.killApplication(currentAppId, msg);
LOG.info(msg);
return EXIT_SUCCESS;
}
if (StringUtils.isEmpty(report.getHost())) {
throw new YarnException(serviceName + " AM hostname is empty");
}
LOG.info("Stopping service {}, with appId = {}", serviceName, currentAppId);
try {
ClientAMProtocol proxy =
createAMProxy(report.getHost(), report.getRpcPort());
cachedAppIds.remove(serviceName);
if (proxy != null) {
// try to stop the app gracefully.
StopRequestProto request = StopRequestProto.newBuilder().build();
proxy.stop(request);
LOG.info("Service " + serviceName + " is being gracefully stopped...");
} else {
yarnClient.killApplication(currentAppId,
serviceName + " is forcefully killed by user!");
LOG.info("Forcefully kill the service: " + serviceName);
return EXIT_SUCCESS;
}
if (!waitForAppStopped) {
return EXIT_SUCCESS;
}
// Wait until the app is killed.
long startTime = System.currentTimeMillis();
int pollCount = 0;
while (true) {
Thread.sleep(2000);
report = yarnClient.getApplicationReport(currentAppId);
if (terminatedStates.contains(report.getYarnApplicationState())) {
LOG.info("Service " + serviceName + " is stopped.");
break;
}
// Forcefully kill after 10 seconds.
if ((System.currentTimeMillis() - startTime) > 10000) {
LOG.info("Stop operation timeout stopping, forcefully kill the app "
+ serviceName);
yarnClient.killApplication(currentAppId,
"Forcefully kill the app by user");
break;
}
if (++pollCount % 10 == 0) {
LOG.info("Waiting for service " + serviceName + " to be stopped.");
}
}
} catch (IOException | YarnException | InterruptedException e) {
LOG.info("Failed to stop " + serviceName
+ " gracefully, forcefully kill the app.");
yarnClient.killApplication(currentAppId, "Forcefully kill the app");
}
return EXIT_SUCCESS;
}
public int actionDestroy(String serviceName) throws YarnException,
IOException {
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
verifyNoLiveAppInRM(serviceName, "destroy");
Path appDir = fs.buildClusterDirPath(serviceName);
FileSystem fileSystem = fs.getFileSystem();
// remove from the appId cache
cachedAppIds.remove(serviceName);
if (fileSystem.exists(appDir)) {
if (fileSystem.delete(appDir, true)) {
LOG.info("Successfully deleted service dir for " + serviceName + ": "
+ appDir);
} else {
String message =
"Failed to delete service + " + serviceName + " at: " + appDir;
LOG.info(message);
throw new YarnException(message);
}
}
try {
deleteZKNode(serviceName);
} catch (Exception e) {
throw new IOException("Could not delete zk node for " + serviceName, e);
}
String registryPath = ServiceRegistryUtils.registryPathForInstance(serviceName);
try {
getRegistryClient().delete(registryPath, true);
} catch (IOException e) {
LOG.warn("Error deleting registry entry {}", registryPath, e);
}
LOG.info("Destroyed cluster {}", serviceName);
return EXIT_SUCCESS;
}
private synchronized RegistryOperations getRegistryClient()
throws SliderException, IOException {
if (registryClient == null) {
registryClient =
RegistryOperationsFactory.createInstance("ServiceClient", getConfig());
registryClient.init(getConfig());
registryClient.start();
}
return registryClient;
}
private void deleteZKNode(String clusterName) throws Exception {
CuratorFramework curatorFramework = getCuratorClient();
String user = RegistryUtils.currentUser();
String zkPath = ServiceRegistryUtils.mkClusterPath(user, clusterName);
if (curatorFramework.checkExists().forPath(zkPath) != null) {
curatorFramework.delete().deletingChildrenIfNeeded().forPath(zkPath);
LOG.info("Deleted zookeeper path: " + zkPath);
}
}
private synchronized CuratorFramework getCuratorClient()
throws BadConfigException {
String registryQuorum =
getConfig().get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
// though if neither is set: trouble
if (ServiceUtils.isUnset(registryQuorum)) {
throw new BadConfigException(
"No Zookeeper quorum provided in the" + " configuration property "
+ RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
}
ZookeeperUtils.splitToHostsAndPortsStrictly(registryQuorum);
if (curatorClient == null) {
curatorClient =
CuratorFrameworkFactory.builder().connectString(registryQuorum)
.sessionTimeoutMs(10000).retryPolicy(new RetryNTimes(5, 2000))
.build();
curatorClient.start();
}
return curatorClient;
}
private void verifyNoLiveAppInRM(String serviceName, String action)
throws IOException, YarnException {
Set<String> types = new HashSet<>(1);
types.add(YarnServiceConstants.APP_TYPE);
Set<String> tags = null;
if (serviceName != null) {
tags = Collections.singleton(ServiceUtils.createNameTag(serviceName));
}
GetApplicationsRequest request = GetApplicationsRequest.newInstance();
request.setApplicationTypes(types);
request.setApplicationTags(tags);
request.setApplicationStates(liveStates);
List<ApplicationReport> reports = yarnClient.getApplications(request);
if (!reports.isEmpty()) {
String message = "";
if (action.equals("destroy")) {
message = "Failed to destroy service " + serviceName
+ ", because it is still running.";
} else {
message = "Failed to " + action + " service " + serviceName
+ ", because it already exists.";
}
throw new YarnException(message);
}
}
private ApplicationId submitApp(Service app)
throws IOException, YarnException {
String serviceName = app.getName();
Configuration conf = getConfig();
Path appRootDir = fs.buildClusterDirPath(app.getName());
YarnClientApplication yarnApp = yarnClient.createApplication();
ApplicationSubmissionContext submissionContext =
yarnApp.getApplicationSubmissionContext();
ServiceApiUtil.validateCompResourceSize(
yarnApp.getNewApplicationResponse().getMaximumResourceCapability(),
app);
submissionContext.setKeepContainersAcrossApplicationAttempts(true);
if (app.getLifetime() > 0) {
Map<ApplicationTimeoutType, Long> appTimeout = new HashMap<>();
appTimeout.put(ApplicationTimeoutType.LIFETIME, app.getLifetime());
submissionContext.setApplicationTimeouts(appTimeout);
}
submissionContext.setMaxAppAttempts(YarnServiceConf
.getInt(YarnServiceConf.AM_RESTART_MAX, 20, app.getConfiguration(),
conf));
setLogAggregationContext(app, conf, submissionContext);
Map<String, LocalResource> localResources = new HashMap<>();
// copy local slideram-log4j.properties to hdfs and add to localResources
boolean hasAMLog4j =
addAMLog4jResource(serviceName, conf, localResources);
// copy jars to hdfs and add to localResources
addJarResource(serviceName, localResources);
// add keytab if in secure env
addKeytabResourceIfSecure(fs, localResources, conf, serviceName);
if (LOG.isDebugEnabled()) {
printLocalResources(localResources);
}
Map<String, String> env = addAMEnv();
// create AM CLI
String cmdStr = buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j);
submissionContext.setResource(Resource.newInstance(YarnServiceConf
.getLong(YarnServiceConf.AM_RESOURCE_MEM,
YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, app.getConfiguration(),
conf), 1));
String queue = app.getQueue();
if (StringUtils.isEmpty(queue)) {
queue = conf.get(YARN_QUEUE, "default");
}
submissionContext.setQueue(queue);
submissionContext.setApplicationName(serviceName);
submissionContext.setApplicationType(YarnServiceConstants.APP_TYPE);
Set<String> appTags =
AbstractClientProvider.createApplicationTags(serviceName, null, null);
if (!appTags.isEmpty()) {
submissionContext.setApplicationTags(appTags);
}
ContainerLaunchContext amLaunchContext =
Records.newRecord(ContainerLaunchContext.class);
amLaunchContext.setCommands(Collections.singletonList(cmdStr));
amLaunchContext.setEnvironment(env);
amLaunchContext.setLocalResources(localResources);
submissionContext.setAMContainerSpec(amLaunchContext);
yarnClient.submitApplication(submissionContext);
return submissionContext.getApplicationId();
}
private void setLogAggregationContext(Service app, Configuration conf,
ApplicationSubmissionContext submissionContext) {
LogAggregationContext context = Records.newRecord(LogAggregationContext
.class);
String finalLogInclude = YarnServiceConf.get
(FINAL_LOG_INCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(finalLogInclude)) {
context.setIncludePattern(finalLogInclude);
}
String finalLogExclude = YarnServiceConf.get
(FINAL_LOG_EXCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(finalLogExclude)) {
context.setExcludePattern(finalLogExclude);
}
String rollingLogInclude = YarnServiceConf.get
(ROLLING_LOG_INCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(rollingLogInclude)) {
context.setRolledLogsIncludePattern(rollingLogInclude);
}
String rollingLogExclude = YarnServiceConf.get
(ROLLING_LOG_EXCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(rollingLogExclude)) {
context.setRolledLogsExcludePattern(rollingLogExclude);
}
submissionContext.setLogAggregationContext(context);
}
private void printLocalResources(Map<String, LocalResource> map) {
LOG.debug("Added LocalResource for localization: ");
StringBuilder builder = new StringBuilder();
for (Map.Entry<String, LocalResource> entry : map.entrySet()) {
builder.append(entry.getKey()).append(" -> ")
.append(entry.getValue().getResource().getFile())
.append(System.lineSeparator());
}
LOG.debug(builder.toString());
}
private String buildCommandLine(String serviceName, Configuration conf,
Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException {
JavaCommandLineBuilder CLI = new JavaCommandLineBuilder();
CLI.forceIPv4().headless();
//TODO CLI.setJVMHeap
//TODO CLI.addJVMOPTS
if (hasSliderAMLog4j) {
CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME);
CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
}
CLI.add(ServiceMaster.class.getCanonicalName());
//TODO debugAM CLI.add(Arguments.ARG_DEBUG)
CLI.add("-" + ServiceMaster.YARNFILE_OPTION, new Path(appRootDir,
serviceName + ".json"));
// pass the registry binding
CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT,
RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
CLI.addMandatoryConfOption(conf, RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
// write out the path output
CLI.addOutAndErrFiles(STDOUT_AM, STDERR_AM);
String cmdStr = CLI.build();
LOG.debug("AM launch command: {}", cmdStr);
return cmdStr;
}
private Map<String, String> addAMEnv() throws IOException {
Map<String, String> env = new HashMap<>();
ClasspathConstructor classpath =
buildClasspath(YarnServiceConstants.SUBMITTED_CONF_DIR, "lib", fs, getConfig()
.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false));
env.put("CLASSPATH", classpath.buildClasspath());
env.put("LANG", "en_US.UTF-8");
env.put("LC_ALL", "en_US.UTF-8");
env.put("LANGUAGE", "en_US.UTF-8");
String jaas = System.getenv("HADOOP_JAAS_DEBUG");
if (jaas != null) {
env.put("HADOOP_JAAS_DEBUG", jaas);
}
if (!UserGroupInformation.isSecurityEnabled()) {
String userName = UserGroupInformation.getCurrentUser().getUserName();
LOG.debug("Run as user " + userName);
// HADOOP_USER_NAME env is used by UserGroupInformation when log in
// This env makes AM run as this user
env.put("HADOOP_USER_NAME", userName);
}
LOG.debug("AM env: \n{}", stringifyMap(env));
return env;
}
protected Path addJarResource(String serviceName,
Map<String, LocalResource> localResources)
throws IOException, SliderException {
Path libPath = fs.buildClusterDirPath(serviceName);
ProviderUtils
.addProviderJar(localResources, ServiceMaster.class, SERVICE_CORE_JAR, fs,
libPath, "lib", false);
Path dependencyLibTarGzip = fs.getDependencyTarGzip();
if (fs.isFile(dependencyLibTarGzip)) {
LOG.debug("Loading lib tar from " + fs.getFileSystem().getScheme() + ":/"
+ dependencyLibTarGzip);
fs.submitTarGzipAndUpdate(localResources);
} else {
String[] libs = ServiceUtils.getLibDirs();
LOG.info("Uploading all dependency jars to HDFS. For faster submission of" +
" apps, pre-upload dependency jars to HDFS "
+ "using command: yarn app -enableFastLaunch");
for (String libDirProp : libs) {
ProviderUtils.addAllDependencyJars(localResources, fs, libPath, "lib",
libDirProp);
}
}
return libPath;
}
private boolean addAMLog4jResource(String serviceName, Configuration conf,
Map<String, LocalResource> localResources)
throws IOException, BadClusterStateException {
boolean hasAMLog4j = false;
String hadoopConfDir =
System.getenv(ApplicationConstants.Environment.HADOOP_CONF_DIR.name());
if (hadoopConfDir != null) {
File localFile =
new File(hadoopConfDir, YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME);
if (localFile.exists()) {
Path localFilePath = createLocalPath(localFile);
Path appDirPath = fs.buildClusterDirPath(serviceName);
Path remoteConfPath =
new Path(appDirPath, YarnServiceConstants.SUBMITTED_CONF_DIR);
Path remoteFilePath =
new Path(remoteConfPath, YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME);
copy(conf, localFilePath, remoteFilePath);
LocalResource localResource =
fs.createAmResource(remoteConfPath, LocalResourceType.FILE);
localResources.put(localFilePath.getName(), localResource);
hasAMLog4j = true;
} else {
LOG.warn("AM log4j property file doesn't exist: " + localFile);
}
}
return hasAMLog4j;
}
public int actionStart(String serviceName) throws YarnException, IOException {
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
Path appDir = checkAppExistOnHdfs(serviceName);
Service service = ServiceApiUtil.loadService(fs, serviceName);
ServiceApiUtil.validateAndResolveService(service, fs, getConfig());
// see if it is actually running and bail out;
verifyNoLiveAppInRM(serviceName, "thaw");
ApplicationId appId = submitApp(service);
service.setId(appId.toString());
// write app definition on to hdfs
Path appJson = persistAppDef(appDir, service);
LOG.info("Persisted service " + service.getName() + " at " + appJson);
return 0;
}
private Path checkAppNotExistOnHdfs(Service service)
throws IOException, SliderException {
Path appDir = fs.buildClusterDirPath(service.getName());
fs.verifyDirectoryNonexistent(
new Path(appDir, service.getName() + ".json"));
return appDir;
}
private Path checkAppExistOnHdfs(String serviceName)
throws IOException, SliderException {
Path appDir = fs.buildClusterDirPath(serviceName);
fs.verifyPathExists(new Path(appDir, serviceName + ".json"));
return appDir;
}
private void createDirAndPersistApp(Path appDir, Service service)
throws IOException, SliderException {
FsPermission appDirPermission = new FsPermission("750");
fs.createWithPermissions(appDir, appDirPermission);
Path appJson = persistAppDef(appDir, service);
LOG.info("Persisted service " + service.getName() + " at " + appJson);
}
private Path persistAppDef(Path appDir, Service service) throws IOException {
Path appJson = new Path(appDir, service.getName() + ".json");
jsonSerDeser.save(fs.getFileSystem(), appJson, service, true);
return appJson;
}
private void addKeytabResourceIfSecure(SliderFileSystem fileSystem,
Map<String, LocalResource> localResource, Configuration conf,
String serviceName) throws IOException, BadConfigException {
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
String keytabPreInstalledOnHost =
conf.get(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH);
if (StringUtils.isEmpty(keytabPreInstalledOnHost)) {
String amKeytabName =
conf.get(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME);
String keytabDir = conf.get(YarnServiceConf.KEY_HDFS_KEYTAB_DIR);
Path keytabPath =
fileSystem.buildKeytabPath(keytabDir, amKeytabName, serviceName);
if (fileSystem.getFileSystem().exists(keytabPath)) {
LocalResource keytabRes =
fileSystem.createAmResource(keytabPath, LocalResourceType.FILE);
localResource
.put(YarnServiceConstants.KEYTAB_DIR + "/" + amKeytabName, keytabRes);
LOG.info("Adding AM keytab on hdfs: " + keytabPath);
} else {
LOG.warn("No keytab file was found at {}.", keytabPath);
if (conf.getBoolean(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) {
throw new BadConfigException("No keytab file was found at %s.",
keytabPath);
} else {
LOG.warn("The AM will be "
+ "started without a kerberos authenticated identity. "
+ "The service is therefore not guaranteed to remain "
+ "operational beyond 24 hours.");
}
}
}
}
public String updateLifetime(String serviceName, long lifetime)
throws YarnException, IOException {
ApplicationId currentAppId = getAppId(serviceName);
ApplicationReport report = yarnClient.getApplicationReport(currentAppId);
if (report == null) {
throw new YarnException("Service not found for " + serviceName);
}
ApplicationId appId = report.getApplicationId();
LOG.info("Updating lifetime of an service: serviceName = " + serviceName
+ ", appId = " + appId + ", lifetime = " + lifetime);
Map<ApplicationTimeoutType, String> map = new HashMap<>();
String newTimeout =
Times.formatISO8601(System.currentTimeMillis() + lifetime * 1000);
map.put(ApplicationTimeoutType.LIFETIME, newTimeout);
UpdateApplicationTimeoutsRequest request =
UpdateApplicationTimeoutsRequest.newInstance(appId, map);
yarnClient.updateApplicationTimeouts(request);
LOG.info(
"Successfully updated lifetime for an service: serviceName = " + serviceName
+ ", appId = " + appId + ". New expiry time in ISO8601 format is "
+ newTimeout);
return newTimeout;
}
public ServiceState convertState(FinalApplicationStatus status) {
switch (status) {
case UNDEFINED:
return ServiceState.ACCEPTED;
case FAILED:
case KILLED:
return ServiceState.FAILED;
case ENDED:
case SUCCEEDED:
return ServiceState.STOPPED;
}
return ServiceState.ACCEPTED;
}
public String getStatusString(String appId)
throws IOException, YarnException {
ApplicationReport appReport =
yarnClient.getApplicationReport(ApplicationId.fromString(appId));
if (appReport.getYarnApplicationState() != RUNNING) {
return "";
}
if (StringUtils.isEmpty(appReport.getHost())) {
return "";
}
ClientAMProtocol amProxy =
createAMProxy(appReport.getHost(), appReport.getRpcPort());
GetStatusResponseProto response =
amProxy.getStatus(GetStatusRequestProto.newBuilder().build());
return response.getStatus();
}
public Service getStatus(String serviceName)
throws IOException, YarnException {
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
ApplicationId currentAppId = getAppId(serviceName);
ApplicationReport appReport = yarnClient.getApplicationReport(currentAppId);
Service appSpec = new Service();
appSpec.setName(serviceName);
appSpec.setState(convertState(appReport.getFinalApplicationStatus()));
ApplicationTimeout lifetime =
appReport.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME);
if (lifetime != null) {
appSpec.setLifetime(lifetime.getRemainingTime());
}
if (appReport.getYarnApplicationState() != RUNNING) {
LOG.info("Service {} is at {} state", serviceName,
appReport.getYarnApplicationState());
return appSpec;
}
if (StringUtils.isEmpty(appReport.getHost())) {
LOG.warn(serviceName + " AM hostname is empty");
return appSpec;
}
ClientAMProtocol amProxy =
createAMProxy(appReport.getHost(), appReport.getRpcPort());
GetStatusResponseProto response =
amProxy.getStatus(GetStatusRequestProto.newBuilder().build());
appSpec = jsonSerDeser.fromJson(response.getStatus());
return appSpec;
}
public YarnClient getYarnClient() {
return this.yarnClient;
}
public int enableFastLaunch() throws IOException, YarnException {
return actionDependency(true);
}
public int actionDependency(boolean overwrite)
throws IOException, YarnException {
String currentUser = RegistryUtils.currentUser();
LOG.info("Running command as user {}", currentUser);
Path dependencyLibTarGzip = fs.getDependencyTarGzip();
// Check if dependency has already been uploaded, in which case log
// appropriately and exit success (unless overwrite has been requested)
if (fs.isFile(dependencyLibTarGzip) && !overwrite) {
System.out.println(String.format(
"Dependency libs are already uploaded to %s.", dependencyLibTarGzip
.toUri()));
return EXIT_SUCCESS;
}
String[] libDirs = ServiceUtils.getLibDirs();
if (libDirs.length > 0) {
File tempLibTarGzipFile = File.createTempFile(
YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + "_",
YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT);
// copy all jars
tarGzipFolder(libDirs, tempLibTarGzipFile, createJarFilter());
LOG.info("Version Info: " + VersionInfo.getBuildVersion());
fs.copyLocalFileToHdfs(tempLibTarGzipFile, dependencyLibTarGzip,
new FsPermission(YarnServiceConstants.DEPENDENCY_DIR_PERMISSIONS));
return EXIT_SUCCESS;
} else {
return EXIT_FALSE;
}
}
protected ClientAMProtocol createAMProxy(String host, int port)
throws IOException {
InetSocketAddress address =
NetUtils.createSocketAddrForHost(host, port);
return ClientAMProxy.createProxy(getConfig(), ClientAMProtocol.class,
UserGroupInformation.getCurrentUser(), rpc, address);
}
private synchronized ApplicationId getAppId(String serviceName)
throws IOException, YarnException {
if (cachedAppIds.containsKey(serviceName)) {
return cachedAppIds.get(serviceName);
}
Service persistedService = ServiceApiUtil.loadService(fs, serviceName);
if (persistedService == null) {
throw new YarnException("Service " + serviceName
+ " doesn't exist on hdfs. Please check if the app exists in RM");
}
ApplicationId currentAppId = ApplicationId.fromString(persistedService.getId());
cachedAppIds.put(serviceName, currentAppId);
return currentAppId;
}
}

View File

@ -0,0 +1,584 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
import org.apache.hadoop.yarn.service.ContainerFailureTracker;
import org.apache.hadoop.yarn.service.ServiceContext;
import org.apache.hadoop.yarn.service.ServiceScheduler;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
import org.apache.hadoop.yarn.service.ServiceMetrics;
import org.apache.hadoop.yarn.service.provider.ProviderUtils;
import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.Apps;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils;
import org.apache.hadoop.yarn.service.monitor.probe.Probe;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.*;
import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.START;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.STOP;
import static org.apache.hadoop.yarn.service.component.ComponentState.*;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD;
public class Component implements EventHandler<ComponentEvent> {
private static final Logger LOG = LoggerFactory.getLogger(Component.class);
private org.apache.hadoop.yarn.service.api.records.Component componentSpec;
private long allocateId;
private Priority priority;
private ServiceMetrics componentMetrics;
private ServiceScheduler scheduler;
private ServiceContext context;
private AMRMClientAsync<ContainerRequest> amrmClient;
private AtomicLong instanceIdCounter = new AtomicLong();
private Map<String, ComponentInstance> compInstances =
new ConcurrentHashMap<>();
// component instances to be assigned with a container
private List<ComponentInstance> pendingInstances = new LinkedList<>();
private ContainerFailureTracker failureTracker;
private Probe probe;
private final ReentrantReadWriteLock.ReadLock readLock;
private final ReentrantReadWriteLock.WriteLock writeLock;
public int maxContainerFailurePerComp;
// The number of containers failed since last reset. This excludes preempted,
// disk_failed containers etc. This will be reset to 0 periodically.
public AtomicInteger currentContainerFailure = new AtomicInteger(0);
private StateMachine<ComponentState, ComponentEventType, ComponentEvent>
stateMachine;
private AsyncDispatcher compInstanceDispatcher;
private static final StateMachineFactory<Component, ComponentState, ComponentEventType, ComponentEvent>
stateMachineFactory =
new StateMachineFactory<Component, ComponentState, ComponentEventType, ComponentEvent>(
INIT)
// INIT will only got to FLEXING
.addTransition(INIT, EnumSet.of(STABLE, FLEXING),
FLEX, new FlexComponentTransition())
// container recovered on AM restart
.addTransition(INIT, INIT, CONTAINER_RECOVERED,
new ContainerRecoveredTransition())
// container allocated by RM
.addTransition(FLEXING, FLEXING, CONTAINER_ALLOCATED,
new ContainerAllocatedTransition())
// container launched on NM
.addTransition(FLEXING, EnumSet.of(STABLE, FLEXING),
CONTAINER_STARTED, new ContainerStartedTransition())
// container failed while flexing
.addTransition(FLEXING, FLEXING, CONTAINER_COMPLETED,
new ContainerCompletedTransition())
// Flex while previous flex is still in progress
.addTransition(FLEXING, EnumSet.of(FLEXING, STABLE), FLEX,
new FlexComponentTransition())
// container failed while stable
.addTransition(STABLE, FLEXING, CONTAINER_COMPLETED,
new ContainerCompletedTransition())
// Ignore surplus container
.addTransition(STABLE, STABLE, CONTAINER_ALLOCATED,
new ContainerAllocatedTransition())
// Flex by user
// For flex up, go to FLEXING state
// For flex down, go to STABLE state
.addTransition(STABLE, EnumSet.of(STABLE, FLEXING),
FLEX, new FlexComponentTransition())
.installTopology();
public Component(
org.apache.hadoop.yarn.service.api.records.Component component,
long allocateId, ServiceContext context) {
this.allocateId = allocateId;
this.priority = Priority.newInstance((int) allocateId);
this.componentSpec = component;
componentMetrics = ServiceMetrics.register(component.getName(),
"Metrics for component " + component.getName());
componentMetrics
.tag("type", "Metrics type [component or service]", "component");
this.scheduler = context.scheduler;
this.context = context;
amrmClient = scheduler.getAmRMClient();
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
this.writeLock = lock.writeLock();
this.stateMachine = stateMachineFactory.make(this);
compInstanceDispatcher = scheduler.getCompInstanceDispatcher();
failureTracker =
new ContainerFailureTracker(context, this);
probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck());
maxContainerFailurePerComp = componentSpec.getConfiguration()
.getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10);
createNumCompInstances(component.getNumberOfContainers());
}
private void createNumCompInstances(long count) {
for (int i = 0; i < count; i++) {
createOneCompInstance();
}
}
private void createOneCompInstance() {
ComponentInstanceId id =
new ComponentInstanceId(instanceIdCounter.getAndIncrement(),
componentSpec.getName());
ComponentInstance instance = new ComponentInstance(this, id);
compInstances.put(instance.getCompInstanceName(), instance);
pendingInstances.add(instance);
}
private static class FlexComponentTransition implements
MultipleArcTransition<Component, ComponentEvent, ComponentState> {
// For flex up, go to FLEXING state
// For flex down, go to STABLE state
@Override
public ComponentState transition(Component component,
ComponentEvent event) {
component.setDesiredContainers((int)event.getDesired());
if (!component.areDependenciesReady()) {
LOG.info("[FLEX COMPONENT {}]: Flex deferred because dependencies not"
+ " satisfied.", component.getName());
return component.getState();
}
if (component.getState() == INIT) {
// This happens on init
LOG.info("[INIT COMPONENT " + component.getName() + "]: " + event
.getDesired() + " instances.");
component.requestContainers(component.pendingInstances.size());
return checkIfStable(component);
}
long before = component.getComponentSpec().getNumberOfContainers();
long delta = event.getDesired() - before;
component.getComponentSpec().setNumberOfContainers(event.getDesired());
if (delta > 0) {
// Scale up
LOG.info("[FLEX UP COMPONENT " + component.getName() + "]: scaling up from "
+ before + " to " + event.getDesired());
component.requestContainers(delta);
component.createNumCompInstances(delta);
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
return FLEXING;
} else if (delta < 0){
delta = 0 - delta;
// scale down
LOG.info("[FLEX DOWN COMPONENT " + component.getName()
+ "]: scaling down from " + before + " to " + event.getDesired());
List<ComponentInstance> list =
new ArrayList<>(component.getAllComponentInstances());
// sort in Most recent -> oldest order, destroy most recent ones.
list.sort(Collections.reverseOrder());
for (int i = 0; i < delta; i++) {
ComponentInstance instance = list.get(i);
// remove the instance
component.compInstances.remove(instance.getCompInstanceName());
component.pendingInstances.remove(instance);
// decrement id counter
component.instanceIdCounter.decrementAndGet();
instance.destroy();
}
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
return STABLE;
} else {
LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " +
event.getDesired() + " instances, ignoring");
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
return STABLE;
}
}
}
private static class ContainerAllocatedTransition extends BaseTransition {
@Override
public void transition(Component component, ComponentEvent event) {
component.assignContainerToCompInstance(event.getContainer());
}
}
private static class ContainerRecoveredTransition extends BaseTransition {
@Override
public void transition(Component component, ComponentEvent event) {
ComponentInstance instance = event.getInstance();
Container container = event.getContainer();
if (instance == null) {
LOG.info("[COMPONENT {}]: Trying to recover {} but event did not " +
"specify component instance",
component.getName(), container.getId());
component.releaseContainer(container);
return;
}
if (instance.hasContainer()) {
LOG.info(
"[COMPONENT {}]: Instance {} already has container, release " +
"surplus container {}",
instance.getCompName(), instance.getCompInstanceId(), container
.getId());
component.releaseContainer(container);
return;
}
component.pendingInstances.remove(instance);
LOG.info("[COMPONENT {}]: Recovered {} for component instance {} on " +
"host {}, num pending component instances reduced to {} ",
component.getName(), container.getId(), instance
.getCompInstanceName(), container.getNodeId(), component
.pendingInstances.size());
instance.setContainer(container);
ProviderUtils.initCompInstanceDir(component.getContext().fs, instance);
component.getScheduler().addLiveCompInstance(container.getId(), instance);
LOG.info("[COMPONENT {}]: Marking {} as started for component " +
"instance {}", component.getName(), event.getContainer().getId(),
instance.getCompInstanceId());
component.compInstanceDispatcher.getEventHandler().handle(
new ComponentInstanceEvent(instance.getContainerId(),
START));
}
}
private static class ContainerStartedTransition implements
MultipleArcTransition<Component,ComponentEvent,ComponentState> {
@Override public ComponentState transition(Component component,
ComponentEvent event) {
component.compInstanceDispatcher.getEventHandler().handle(
new ComponentInstanceEvent(event.getInstance().getContainerId(),
START));
return checkIfStable(component);
}
}
private static ComponentState checkIfStable(Component component) {
// if desired == running
if (component.componentMetrics.containersRunning.value() == component
.getComponentSpec().getNumberOfContainers()) {
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
return STABLE;
} else {
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
return FLEXING;
}
}
private static class ContainerCompletedTransition extends BaseTransition {
@Override
public void transition(Component component, ComponentEvent event) {
component.updateMetrics(event.getStatus());
// add back to pending list
component.pendingInstances.add(event.getInstance());
LOG.info(
"[COMPONENT {}]: {} completed, num pending comp instances increased to {}.",
component.getName(), event.getStatus().getContainerId(),
component.pendingInstances.size());
component.compInstanceDispatcher.getEventHandler().handle(
new ComponentInstanceEvent(event.getStatus().getContainerId(),
STOP).setStatus(event.getStatus()));
component.componentSpec.setState(
org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
}
}
public ServiceMetrics getCompMetrics () {
return componentMetrics;
}
private void releaseContainer(Container container) {
scheduler.getAmRMClient().releaseAssignedContainer(container.getId());
componentMetrics.surplusContainers.incr();
scheduler.getServiceMetrics().surplusContainers.incr();
}
private void assignContainerToCompInstance(Container container) {
if (pendingInstances.size() == 0) {
LOG.info(
"[COMPONENT {}]: No pending component instance left, release surplus container {}",
getName(), container.getId());
releaseContainer(container);
return;
}
ComponentInstance instance = pendingInstances.remove(0);
LOG.info(
"[COMPONENT {}]: {} allocated, num pending component instances reduced to {}",
getName(), container.getId(), pendingInstances.size());
instance.setContainer(container);
scheduler.addLiveCompInstance(container.getId(), instance);
LOG.info(
"[COMPONENT {}]: Assigned {} to component instance {} and launch on host {} ",
getName(), container.getId(), instance.getCompInstanceName(),
container.getNodeId());
scheduler.getContainerLaunchService()
.launchCompInstance(scheduler.getApp(), instance, container);
}
@SuppressWarnings({ "unchecked" })
public void requestContainers(long count) {
Resource resource = Resource
.newInstance(componentSpec.getResource().getMemoryMB(),
componentSpec.getResource().getCpus());
for (int i = 0; i < count; i++) {
//TODO Once YARN-5468 is done, use that for anti-affinity
ContainerRequest request =
ContainerRequest.newBuilder().capability(resource).priority(priority)
.allocationRequestId(allocateId).relaxLocality(true).build();
amrmClient.addContainerRequest(request);
}
}
private void setDesiredContainers(int n) {
int delta = n - scheduler.getServiceMetrics().containersDesired.value();
if (delta > 0) {
scheduler.getServiceMetrics().containersDesired.incr(delta);
} else {
scheduler.getServiceMetrics().containersDesired.decr(delta);
}
componentMetrics.containersDesired.set(n);
}
private void updateMetrics(ContainerStatus status) {
switch (status.getExitStatus()) {
case SUCCESS:
componentMetrics.containersSucceeded.incr();
scheduler.getServiceMetrics().containersSucceeded.incr();
return;
case PREEMPTED:
componentMetrics.containersPreempted.incr();
scheduler.getServiceMetrics().containersPreempted.incr();
break;
case DISKS_FAILED:
componentMetrics.containersDiskFailure.incr();
scheduler.getServiceMetrics().containersDiskFailure.incr();
break;
default:
break;
}
// containersFailed include preempted, disks_failed etc.
componentMetrics.containersFailed.incr();
scheduler.getServiceMetrics().containersFailed.incr();
if (Apps.shouldCountTowardsNodeBlacklisting(status.getExitStatus())) {
String host = scheduler.getLiveInstances().get(status.getContainerId())
.getNodeId().getHost();
failureTracker.incNodeFailure(host);
currentContainerFailure.getAndIncrement() ;
}
}
public boolean areDependenciesReady() {
List<String> dependencies = componentSpec.getDependencies();
if (ServiceUtils.isEmpty(dependencies)) {
return true;
}
for (String dependency : dependencies) {
Component dependentComponent =
scheduler.getAllComponents().get(dependency);
if (dependentComponent == null) {
LOG.error("Couldn't find dependency {} for {} (should never happen)",
dependency, getName());
continue;
}
if (dependentComponent.getNumReadyInstances() < dependentComponent
.getNumDesiredInstances()) {
LOG.info("[COMPONENT {}]: Dependency {} not satisfied, only {} of {}"
+ " instances are ready.", getName(), dependency,
dependentComponent.getNumReadyInstances(),
dependentComponent.getNumDesiredInstances());
return false;
}
}
return true;
}
public Map<String, String> getDependencyHostIpTokens() {
Map<String, String> tokens = new HashMap<>();
List<String> dependencies = componentSpec.getDependencies();
if (ServiceUtils.isEmpty(dependencies)) {
return tokens;
}
for (String dependency : dependencies) {
Collection<ComponentInstance> instances = scheduler.getAllComponents()
.get(dependency).getAllComponentInstances();
for (ComponentInstance instance : instances) {
if (instance.getContainerStatus() == null) {
continue;
}
if (ServiceUtils.isEmpty(instance.getContainerStatus().getIPs()) ||
ServiceUtils.isUnset(instance.getContainerStatus().getHost())) {
continue;
}
String ip = instance.getContainerStatus().getIPs().get(0);
String host = instance.getContainerStatus().getHost();
tokens.put(String.format(COMPONENT_INSTANCE_IP,
instance.getCompInstanceName().toUpperCase()), ip);
tokens.put(String.format(COMPONENT_INSTANCE_HOST,
instance.getCompInstanceName().toUpperCase()), host);
}
}
return tokens;
}
public void incRunningContainers() {
componentMetrics.containersRunning.incr();
scheduler.getServiceMetrics().containersRunning.incr();
}
public void decRunningContainers() {
componentMetrics.containersRunning.decr();
scheduler.getServiceMetrics().containersRunning.decr();
}
public void incContainersReady() {
componentMetrics.containersReady.incr();
scheduler.getServiceMetrics().containersReady.incr();
}
public void decContainersReady() {
componentMetrics.containersReady.decr();
scheduler.getServiceMetrics().containersReady.decr();
}
public int getNumReadyInstances() {
return componentMetrics.containersReady.value();
}
public int getNumRunningInstances() {
return componentMetrics.containersRunning.value();
}
public int getNumDesiredInstances() {
return componentMetrics.containersDesired.value();
}
public ComponentInstance getComponentInstance(String componentInstanceName) {
return compInstances.get(componentInstanceName);
}
public Collection<ComponentInstance> getAllComponentInstances() {
return compInstances.values();
}
public org.apache.hadoop.yarn.service.api.records.Component getComponentSpec() {
return this.componentSpec;
}
public void resetCompFailureCount() {
LOG.info("[COMPONENT {}]: Reset container failure count from {} to 0.",
getName(), currentContainerFailure.get());
currentContainerFailure.set(0);
failureTracker.resetContainerFailures();
}
public Probe getProbe() {
return probe;
}
public Priority getPriority() {
return priority;
}
public long getAllocateId() {
return allocateId;
}
public String getName () {
return componentSpec.getName();
}
public ComponentState getState() {
this.readLock.lock();
try {
return this.stateMachine.getCurrentState();
} finally {
this.readLock.unlock();
}
}
public ServiceScheduler getScheduler() {
return scheduler;
}
@Override
public void handle(ComponentEvent event) {
try {
writeLock.lock();
ComponentState oldState = getState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error(MessageFormat.format("[COMPONENT {0}]: Invalid event {1} at {2}",
componentSpec.getName(), event.getType(), oldState), e);
}
if (oldState != getState()) {
LOG.info("[COMPONENT {}] Transitioned from {} to {} on {} event.",
componentSpec.getName(), oldState, getState(), event.getType());
}
} finally {
writeLock.unlock();
}
}
private static class BaseTransition implements
SingleArcTransition<Component, ComponentEvent> {
@Override public void transition(Component component,
ComponentEvent event) {
}
}
public ServiceContext getContext() {
return context;
}
}

View File

@ -0,0 +1,83 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.event.AbstractEvent;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
public class ComponentEvent extends AbstractEvent<ComponentEventType> {
private long desired;
private final String name;
private final ComponentEventType type;
private Container container;
private ComponentInstance instance;
private ContainerStatus status;
public ComponentEvent(String name, ComponentEventType type) {
super(type);
this.name = name;
this.type = type;
}
public String getName() {
return name;
}
public ComponentEventType getType() {
return type;
}
public long getDesired() {
return desired;
}
public ComponentEvent setDesired(long desired) {
this.desired = desired;
return this;
}
public Container getContainer() {
return container;
}
public ComponentEvent setContainer(Container container) {
this.container = container;
return this;
}
public ComponentInstance getInstance() {
return instance;
}
public ComponentEvent setInstance(ComponentInstance instance) {
this.instance = instance;
return this;
}
public ContainerStatus getStatus() {
return status;
}
public ComponentEvent setStatus(ContainerStatus status) {
this.status = status;
return this;
}
}

View File

@ -0,0 +1,27 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component;
public enum ComponentEventType {
FLEX,
CONTAINER_ALLOCATED,
CONTAINER_RECOVERED,
CONTAINER_STARTED,
CONTAINER_COMPLETED
}

View File

@ -0,0 +1,25 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component;
public enum ComponentState {
INIT,
FLEXING,
STABLE
}

View File

@ -0,0 +1,549 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component.instance;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
import org.apache.hadoop.registry.client.types.ServiceRecord;
import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.client.api.NMClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.service.ServiceScheduler;
import org.apache.hadoop.yarn.service.api.records.ContainerState;
import org.apache.hadoop.yarn.service.component.Component;
import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.BoundedAppender;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus;
import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.Date;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import static org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes.*;
import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_BY_APPMASTER;
import static org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.*;
public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
Comparable<ComponentInstance> {
private static final Logger LOG =
LoggerFactory.getLogger(ComponentInstance.class);
private StateMachine<ComponentInstanceState, ComponentInstanceEventType,
ComponentInstanceEvent> stateMachine;
private Component component;
private final ReadLock readLock;
private final WriteLock writeLock;
private ComponentInstanceId compInstanceId = null;
private Path compInstanceDir;
private Container container;
private YarnRegistryViewForProviders yarnRegistryOperations;
private FileSystem fs;
private boolean timelineServiceEnabled = false;
private ServiceTimelinePublisher serviceTimelinePublisher;
private ServiceScheduler scheduler;
private BoundedAppender diagnostics = new BoundedAppender(64 * 1024);
private volatile ScheduledFuture containerStatusFuture;
private volatile ContainerStatus status;
private long containerStartedTime = 0;
// This container object is used for rest API query
private org.apache.hadoop.yarn.service.api.records.Container containerSpec;
private static final StateMachineFactory<ComponentInstance,
ComponentInstanceState, ComponentInstanceEventType, ComponentInstanceEvent>
stateMachineFactory =
new StateMachineFactory<ComponentInstance, ComponentInstanceState,
ComponentInstanceEventType, ComponentInstanceEvent>(INIT)
.addTransition(INIT, STARTED, START,
new ContainerStartedTransition())
.addTransition(INIT, INIT, STOP,
// container failed before launching, nothing to cleanup from registry
// This could happen if NMClient#startContainerAsync failed, container
// will be completed, but COMP_INSTANCE is still at INIT.
new ContainerStoppedTransition(true))
//From Running
.addTransition(STARTED, INIT, STOP,
new ContainerStoppedTransition())
.addTransition(STARTED, READY, BECOME_READY,
new ContainerBecomeReadyTransition())
// FROM READY
.addTransition(READY, STARTED, BECOME_NOT_READY,
new ContainerBecomeNotReadyTransition())
.addTransition(READY, INIT, STOP, new ContainerStoppedTransition())
.installTopology();
public ComponentInstance(Component component,
ComponentInstanceId compInstanceId) {
this.stateMachine = stateMachineFactory.make(this);
this.component = component;
this.compInstanceId = compInstanceId;
this.scheduler = component.getScheduler();
this.yarnRegistryOperations =
component.getScheduler().getYarnRegistryOperations();
this.serviceTimelinePublisher =
component.getScheduler().getServiceTimelinePublisher();
if (YarnConfiguration
.timelineServiceV2Enabled(component.getScheduler().getConfig())) {
this.timelineServiceEnabled = true;
}
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
this.readLock = lock.readLock();
this.writeLock = lock.writeLock();
this.fs = scheduler.getContext().fs.getFileSystem();
}
private static class ContainerStartedTransition extends BaseTransition {
@Override public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
// Query container status for ip and host
compInstance.containerStatusFuture =
compInstance.scheduler.executorService.scheduleAtFixedRate(
new ContainerStatusRetriever(compInstance.scheduler,
compInstance.getContainerId(), compInstance), 0, 1,
TimeUnit.SECONDS);
compInstance.component.incRunningContainers();
long containerStartTime = System.currentTimeMillis();
try {
ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
.newContainerTokenIdentifier(compInstance.getContainer()
.getContainerToken());
containerStartTime = containerTokenIdentifier.getCreationTime();
} catch (Exception e) {
LOG.info("Could not get container creation time, using current time");
}
org.apache.hadoop.yarn.service.api.records.Container container =
new org.apache.hadoop.yarn.service.api.records.Container();
container.setId(compInstance.getContainerId().toString());
container.setLaunchTime(new Date(containerStartTime));
container.setState(ContainerState.RUNNING_BUT_UNREADY);
container.setBareHost(compInstance.container.getNodeId().getHost());
container.setComponentInstanceName(compInstance.getCompInstanceName());
if (compInstance.containerSpec != null) {
// remove the previous container.
compInstance.getCompSpec().removeContainer(compInstance.containerSpec);
}
compInstance.containerSpec = container;
compInstance.getCompSpec().addContainer(container);
compInstance.containerStartedTime = containerStartTime;
if (compInstance.timelineServiceEnabled) {
compInstance.serviceTimelinePublisher
.componentInstanceStarted(container, compInstance);
}
}
}
private static class ContainerBecomeReadyTransition extends BaseTransition {
@Override
public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
compInstance.component.incContainersReady();
compInstance.containerSpec.setState(ContainerState.READY);
if (compInstance.timelineServiceEnabled) {
compInstance.serviceTimelinePublisher
.componentInstanceBecomeReady(compInstance.containerSpec);
}
}
}
private static class ContainerBecomeNotReadyTransition extends BaseTransition {
@Override
public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
compInstance.component.decContainersReady();
compInstance.containerSpec.setState(ContainerState.RUNNING_BUT_UNREADY);
}
}
private static class ContainerStoppedTransition extends BaseTransition {
// whether the container failed before launched by AM or not.
boolean failedBeforeLaunching = false;
public ContainerStoppedTransition(boolean failedBeforeLaunching) {
this.failedBeforeLaunching = failedBeforeLaunching;
}
public ContainerStoppedTransition() {
this(false);
}
@Override
public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
// re-ask the failed container.
Component comp = compInstance.component;
comp.requestContainers(1);
LOG.info(compInstance.getCompInstanceId()
+ ": Container completed. Requested a new container." + System
.lineSeparator() + " exitStatus={}, diagnostics={}.",
event.getStatus().getExitStatus(),
event.getStatus().getDiagnostics());
String containerDiag =
compInstance.getCompInstanceId() + ": " + event.getStatus()
.getDiagnostics();
compInstance.diagnostics.append(containerDiag + System.lineSeparator());
if (compInstance.getState().equals(READY)) {
compInstance.component.decContainersReady();
}
compInstance.component.decRunningContainers();
boolean shouldExit = false;
// check if it exceeds the failure threshold
if (comp.currentContainerFailure.get() > comp.maxContainerFailurePerComp) {
String exitDiag = MessageFormat.format(
"[COMPONENT {0}]: Failed {1} times, exceeded the limit - {2}. Shutting down now... "
+ System.lineSeparator(),
comp.getName(), comp.currentContainerFailure.get(), comp.maxContainerFailurePerComp);
compInstance.diagnostics.append(exitDiag);
// append to global diagnostics that will be reported to RM.
comp.getScheduler().getDiagnostics().append(containerDiag);
comp.getScheduler().getDiagnostics().append(exitDiag);
LOG.warn(exitDiag);
shouldExit = true;
}
if (!failedBeforeLaunching) {
// clean up registry
// If the container failed before launching, no need to cleanup registry,
// because it was not registered before.
// hdfs dir content will be overwritten when a new container gets started,
// so no need remove.
compInstance.scheduler.executorService
.submit(compInstance::cleanupRegistry);
if (compInstance.timelineServiceEnabled) {
// record in ATS
compInstance.serviceTimelinePublisher.componentInstanceFinished
(compInstance, event.getStatus().getExitStatus(), containerDiag);
}
compInstance.containerSpec.setState(ContainerState.STOPPED);
}
// remove the failed ContainerId -> CompInstance mapping
comp.getScheduler().removeLiveCompInstance(event.getContainerId());
if (shouldExit) {
// Sleep for 5 seconds in hope that the state can be recorded in ATS.
// in case there's a client polling the comp state, it can be notified.
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
LOG.error("Interrupted on sleep while exiting.", e);
}
ExitUtil.terminate(-1);
}
compInstance.removeContainer();
}
}
public ComponentInstanceState getState() {
this.readLock.lock();
try {
return this.stateMachine.getCurrentState();
} finally {
this.readLock.unlock();
}
}
@Override
public void handle(ComponentInstanceEvent event) {
try {
writeLock.lock();
ComponentInstanceState oldState = getState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error(getCompInstanceId() + ": Invalid event " + event.getType() +
" at " + oldState, e);
}
if (oldState != getState()) {
LOG.info(getCompInstanceId() + " Transitioned from " + oldState + " to "
+ getState() + " on " + event.getType() + " event");
}
} finally {
writeLock.unlock();
}
}
public boolean hasContainer() {
return this.container != null;
}
public void removeContainer() {
this.container = null;
this.compInstanceId.setContainerId(null);
}
public void setContainer(Container container) {
this.container = container;
this.compInstanceId.setContainerId(container.getId());
}
public String getCompInstanceName() {
return compInstanceId.getCompInstanceName();
}
public ContainerStatus getContainerStatus() {
return status;
}
public void updateContainerStatus(ContainerStatus status) {
this.status = status;
org.apache.hadoop.yarn.service.api.records.Container container =
getCompSpec().getContainer(getContainerId().toString());
if (container != null) {
container.setIp(StringUtils.join(",", status.getIPs()));
container.setHostname(status.getHost());
if (timelineServiceEnabled) {
serviceTimelinePublisher.componentInstanceIPHostUpdated(container);
}
}
updateServiceRecord(yarnRegistryOperations, status);
}
public ContainerId getContainerId() {
return container.getId();
}
public String getCompName() {
return compInstanceId.getCompName();
}
public void setCompInstanceDir(Path dir) {
this.compInstanceDir = dir;
}
public Component getComponent() {
return component;
}
public Container getContainer() {
return container;
}
public ComponentInstanceId getCompInstanceId() {
return compInstanceId;
}
public NodeId getNodeId() {
return this.container.getNodeId();
}
public org.apache.hadoop.yarn.service.api.records.Component getCompSpec() {
return component.getComponentSpec();
}
private static class BaseTransition implements
SingleArcTransition<ComponentInstance, ComponentInstanceEvent> {
@Override public void transition(ComponentInstance compInstance,
ComponentInstanceEvent event) {
}
}
public ProbeStatus ping() {
if (component.getProbe() == null) {
ProbeStatus status = new ProbeStatus();
status.setSuccess(true);
return status;
}
return component.getProbe().ping(this);
}
// Write service record into registry
private void updateServiceRecord(
YarnRegistryViewForProviders yarnRegistry, ContainerStatus status) {
ServiceRecord record = new ServiceRecord();
String containerId = status.getContainerId().toString();
record.set(YARN_ID, containerId);
record.description = getCompInstanceName();
record.set(YARN_PERSISTENCE, PersistencePolicies.CONTAINER);
record.set(YARN_IP, status.getIPs().get(0));
record.set(YARN_HOSTNAME, status.getHost());
try {
yarnRegistry
.putComponent(RegistryPathUtils.encodeYarnID(containerId), record);
} catch (IOException e) {
LOG.error(
"Failed to update service record in registry: " + containerId + "");
}
}
// Called when user flexed down the container and ContainerStoppedTransition
// is not executed in this case.
// Release the container, dec running,
// cleanup registry, hdfs dir, and send record to ATS
public void destroy() {
LOG.info(getCompInstanceId() + ": Flexed down by user, destroying.");
diagnostics.append(getCompInstanceId() + ": Flexed down by user");
if (container != null) {
scheduler.removeLiveCompInstance(container.getId());
component.getScheduler().getAmRMClient()
.releaseAssignedContainer(container.getId());
getCompSpec().removeContainer(containerSpec);
}
// update metrics
if (getState() == STARTED) {
component.decRunningContainers();
}
if (getState() == READY) {
component.decContainersReady();
component.decRunningContainers();
}
if (timelineServiceEnabled) {
serviceTimelinePublisher.componentInstanceFinished(this,
KILLED_BY_APPMASTER, diagnostics.toString());
}
scheduler.executorService.submit(this::cleanupRegistryAndCompHdfsDir);
}
private void cleanupRegistry() {
ContainerId containerId = getContainerId();
String cid = RegistryPathUtils.encodeYarnID(containerId.toString());
try {
yarnRegistryOperations.deleteComponent(getCompInstanceId(), cid);
} catch (IOException e) {
LOG.error(getCompInstanceId() + ": Failed to delete registry", e);
}
}
//TODO Maybe have a dedicated cleanup service.
public void cleanupRegistryAndCompHdfsDir() {
cleanupRegistry();
try {
if (compInstanceDir != null && fs.exists(compInstanceDir)) {
boolean deleted = fs.delete(compInstanceDir, true);
if (!deleted) {
LOG.error(getCompInstanceId()
+ ": Failed to delete component instance dir: "
+ compInstanceDir);
} else {
LOG.info(getCompInstanceId() + ": Deleted component instance dir: "
+ compInstanceDir);
}
}
} catch (IOException e) {
LOG.warn(getCompInstanceId() + ": Failed to delete directory", e);
}
}
// Query container status until ip and hostname are available and update
// the service record into registry service
private static class ContainerStatusRetriever implements Runnable {
private ContainerId containerId;
private NodeId nodeId;
private NMClient nmClient;
private ComponentInstance instance;
ContainerStatusRetriever(ServiceScheduler scheduler,
ContainerId containerId, ComponentInstance instance) {
this.containerId = containerId;
this.nodeId = instance.getNodeId();
this.nmClient = scheduler.getNmClient().getClient();
this.instance = instance;
}
@Override public void run() {
ContainerStatus status = null;
try {
status = nmClient.getContainerStatus(containerId, nodeId);
} catch (Exception e) {
if (e instanceof YarnException) {
throw new YarnRuntimeException(
instance.compInstanceId + " Failed to get container status on "
+ nodeId + " , cancelling.", e);
}
LOG.error(instance.compInstanceId + " Failed to get container status on "
+ nodeId + ", will try again", e);
return;
}
if (ServiceUtils.isEmpty(status.getIPs()) || ServiceUtils
.isUnset(status.getHost())) {
return;
}
instance.updateContainerStatus(status);
LOG.info(
instance.compInstanceId + " IP = " + status.getIPs() + ", host = "
+ status.getHost() + ", cancel container status retriever");
instance.containerStatusFuture.cancel(false);
}
}
@Override
public int compareTo(ComponentInstance to) {
long delta = containerStartedTime - to.containerStartedTime;
if (delta == 0) {
return getCompInstanceId().compareTo(to.getCompInstanceId());
} else if (delta < 0) {
return -1;
} else {
return 1;
}
}
@Override public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ComponentInstance instance = (ComponentInstance) o;
if (containerStartedTime != instance.containerStartedTime)
return false;
return compInstanceId.equals(instance.compInstanceId);
}
@Override public int hashCode() {
int result = compInstanceId.hashCode();
result = 31 * result + (int) (containerStartedTime ^ (containerStartedTime
>>> 32));
return result;
}
}

View File

@ -0,0 +1,58 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component.instance;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class ComponentInstanceEvent
extends AbstractEvent<ComponentInstanceEventType> {
private ContainerId id;
private ContainerStatus status;
private boolean shouldDestroy = false;
public ComponentInstanceEvent(ContainerId containerId,
ComponentInstanceEventType componentInstanceEventType) {
super(componentInstanceEventType);
this.id = containerId;
}
public ContainerId getContainerId() {
return id;
}
public ContainerStatus getStatus() {
return this.status;
}
public ComponentInstanceEvent setStatus(ContainerStatus status) {
this.status = status;
return this;
}
public void setShouldDestroy() {
shouldDestroy = true;
}
public boolean shouldDestroy() {
return shouldDestroy;
}
}

View File

@ -0,0 +1,26 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component.instance;
public enum ComponentInstanceEventType {
START,
STOP,
BECOME_READY,
BECOME_NOT_READY
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component.instance;
import org.apache.hadoop.yarn.api.records.ContainerId;
public class ComponentInstanceId implements Comparable<ComponentInstanceId> {
private long Id;
private String name;
private ContainerId containerId;
public ComponentInstanceId(long id, String name) {
Id = id;
this.name = name;
}
public long getId() {
return Id;
}
public String getCompName() {
return name;
}
public String getCompInstanceName() {
return getCompName() + "-" + getId();
}
public void setContainerId(ContainerId containerId) {
this.containerId = containerId;
}
@Override
public String toString() {
if (containerId == null) {
return "[COMPINSTANCE " + getCompInstanceName() + "]";
} else {
return "[COMPINSTANCE " + getCompInstanceName() + " : " + containerId + "]";
}
}
@Override public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ComponentInstanceId that = (ComponentInstanceId) o;
if (getId() != that.getId())
return false;
return getCompName() != null ? getCompName().equals(that.getCompName()) :
that.getCompName() == null;
}
@Override public int hashCode() {
int result = (int) (getId() ^ (getId() >>> 32));
result = 31 * result + (getCompName() != null ? getCompName().hashCode() : 0);
return result;
}
@Override
public int compareTo(ComponentInstanceId to) {
int delta = this.getCompName().compareTo(to.getCompName());
if (delta == 0) {
return Long.compare(this.getId(), to.getId());
} else if (delta < 0) {
return -1;
} else {
return 1;
}
}
}

View File

@ -0,0 +1,26 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.component.instance;
public enum ComponentInstanceState {
INIT,
STARTED,
READY,
UPGRADING
}

View File

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.conf;
public interface RestApiConstants {
// Rest endpoints
String CONTEXT_ROOT = "/ws/v1";
String VERSION = "/services/version";
String SERVICE_ROOT_PATH = "/services";
String SERVICE_PATH = "/services/{service_name}";
String COMPONENT_PATH = "/services/{service_name}/components/{component_name}";
// Query param
String SERVICE_NAME = "service_name";
String COMPONENT_NAME = "component_name";
Long DEFAULT_UNLIMITED_LIFETIME = -1l;
Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001;
Integer ERROR_CODE_APP_IS_NOT_RUNNING = 404002;
Integer ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET = 404003;
Integer ERROR_CODE_APP_NAME_INVALID = 404004;
}

View File

@ -0,0 +1,88 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.conf;
import org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes;
public interface SliderExitCodes extends LauncherExitCodes {
/**
* starting point for exit codes; not an exception itself
*/
int _EXIT_CODE_BASE = 64;
/**
* service entered the failed state: {@value}
*/
int EXIT_YARN_SERVICE_FAILED = 65;
/**
* service was killed: {@value}
*/
int EXIT_YARN_SERVICE_KILLED = 66;
/**
* timeout on monitoring client: {@value}
*/
int EXIT_TIMED_OUT = 67;
/**
* service finished with an error: {@value}
*/
int EXIT_YARN_SERVICE_FINISHED_WITH_ERROR = 68;
/**
* the service instance is unknown: {@value}
*/
int EXIT_UNKNOWN_INSTANCE = 69;
/**
* the service instance is in the wrong state for that operation: {@value}
*/
int EXIT_BAD_STATE = 70;
/**
* A spawned master process failed
*/
int EXIT_PROCESS_FAILED = 71;
/**
* The instance failed -too many containers were
* failing or some other threshold was reached
*/
int EXIT_DEPLOYMENT_FAILED = 72;
/**
* The service is live -and the requested operation
* does not work if the cluster is running
*/
int EXIT_APPLICATION_IN_USE = 73;
/**
* There already is an service instance of that name
* when an attempt is made to create a new instance
*/
int EXIT_INSTANCE_EXISTS = 75;
/**
* Exit code when the configurations in valid/incomplete: {@value}
*/
int EXIT_BAD_CONFIGURATION = 77;
}

View File

@ -0,0 +1,113 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.conf;
import org.apache.hadoop.yarn.service.api.records.Configuration;
public class YarnServiceConf {
// Retry settings for the ServiceClient to talk to Service AppMaster
public static final String CLIENT_AM_RETRY_MAX_WAIT_MS = "yarn.service.client-am.retry.max-wait-ms";
public static final String CLIENT_AM_RETRY_MAX_INTERVAL_MS = "yarn.service.client-am.retry-interval-ms";
// Retry settings for container failures
public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max";
public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval-ms";
public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts";
public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory";
public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
public static final String YARN_QUEUE = "yarn.service.queue";
public static final String API_SERVER_ADDRESS = "yarn.service.api-server.address";
public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:";
public static final int DEFAULT_API_SERVER_PORT = 9191;
public static final String FINAL_LOG_INCLUSION_PATTERN = "yarn.service.log.include-pattern";
public static final String FINAL_LOG_EXCLUSION_PATTERN = "yarn.service.log.exclude-pattern";
public static final String ROLLING_LOG_INCLUSION_PATTERN = "yarn.service.rolling-log.include-pattern";
public static final String ROLLING_LOG_EXCLUSION_PATTERN = "yarn.service.rolling-log.exclude-pattern";
/**
* The yarn service base path:
* Defaults to HomeDir/.yarn/
*/
public static final String YARN_SERVICE_BASE_PATH = "yarn.service.base.path";
//TODO rename
/** Declare that a keytab must be provided */
public static final String KEY_AM_LOGIN_KEYTAB_REQUIRED = "slider.am.login.keytab.required";
public static final String KEY_AM_LOGIN_KEYTAB_NAME = "slider.am.login.keytab.name";
public static final String KEY_HDFS_KEYTAB_DIR = "slider.hdfs.keytab.dir";
public static final String KEY_AM_KEYTAB_LOCAL_PATH = "slider.am.keytab.local.path";
/**
* maximum number of failed containers (in a single component)
* before the app exits
*/
public static final String CONTAINER_FAILURE_THRESHOLD =
"yarn.service.container-failure-per-component.threshold";
/**
* Maximum number of container failures on a node before the node is blacklisted
*/
public static final String NODE_BLACKLIST_THRESHOLD =
"yarn.service.node-blacklist.threshold";
/**
* The failure count for CONTAINER_FAILURE_THRESHOLD and NODE_BLACKLIST_THRESHOLD
* gets reset periodically, the unit is seconds.
*/
public static final String CONTAINER_FAILURE_WINDOW =
"yarn.service.failure-count-reset.window";
/**
* interval between readiness checks.
*/
public static final String READINESS_CHECK_INTERVAL = "yarn.service.readiness-check-interval.seconds";
public static final int DEFAULT_READINESS_CHECK_INTERVAL = 30; // seconds
/**
* Get long value for the property. First get from the userConf, if not
* present, get from systemConf.
*
* @param name name of the property
* @param defaultValue default value of the property, if it is not defined in
* userConf and systemConf.
* @param userConf Configuration provided by client in the JSON definition
* @param systemConf The YarnConfiguration in the system.
* @return long value for the property
*/
public static long getLong(String name, long defaultValue,
Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
return userConf.getPropertyLong(name, systemConf.getLong(name, defaultValue));
}
public static int getInt(String name, int defaultValue,
Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue));
}
public static String get(String name, String defaultVal,
Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
return userConf.getProperty(name, systemConf.get(name, defaultVal));
}
}

View File

@ -0,0 +1,92 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.conf;
public interface YarnServiceConstants {
/**
* The path under which cluster and temp data are stored
*/
String SERVICE_BASE_DIRECTORY = ".yarn";
/**
* The paths under which Service AM dependency libraries are stored
*/
String DEPENDENCY_LOCALIZED_DIR_LINK = "service_dep";
String DEPENDENCY_DIR = "/yarn-services/%s/";
String DEPENDENCY_TAR_GZ_FILE_NAME = "service-dep";
String DEPENDENCY_TAR_GZ_FILE_EXT = ".tar.gz";
String DEPENDENCY_DIR_PERMISSIONS = "755";
/**
* Service type for YARN service
*/
String APP_TYPE = "yarn-service";
String KEYTAB_DIR = "keytabs";
String RESOURCE_DIR = "resources";
String SERVICES_DIRECTORY = "services";
/**
* JVM property to define the service lib directory;
* this is set by the yarn.sh script
*/
String PROPERTY_LIB_DIR = "service.libdir";
/**
* name of generated dir for this conf
*/
String SUBMITTED_CONF_DIR = "conf";
/**
* Service AM log4j file name
*/
String YARN_SERVICE_LOG4J_FILENAME = "yarnservice-log4j.properties";
/**
* Log4j sysprop to name the resource
*/
String SYSPROP_LOG4J_CONFIGURATION = "log4j.configuration";
/**
* sysprop for Service AM log4j directory
*/
String SYSPROP_LOG_DIR = "LOG_DIR";
String TMP_DIR_PREFIX = "tmp";
String SERVICE_CORE_JAR = "yarn-service-core.jar";
String STDOUT_AM = "serviceam-out.txt";
String STDERR_AM = "serviceam-err.txt";
String HADOOP_USER_NAME = "HADOOP_USER_NAME";
String APP_CONF_DIR = "conf";
String APP_LIB_DIR = "lib";
String OUT_FILE = "stdout.txt";
String ERR_FILE = "stderr.txt";
String CONTENT = "content";
}

View File

@ -0,0 +1,271 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.containerlaunch;
import com.google.common.base.Preconditions;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerRetryContext;
import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
import org.apache.hadoop.yarn.service.utils.CoreFileSystem;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import static org.apache.hadoop.yarn.service.provider.docker.DockerKeys.DEFAULT_DOCKER_NETWORK;
/**
* Launcher of applications: base class
*/
public class AbstractLauncher {
private static final Logger log =
LoggerFactory.getLogger(AbstractLauncher.class);
public static final String CLASSPATH = "CLASSPATH";
/**
* Filesystem to use for the launch
*/
protected final CoreFileSystem coreFileSystem;
/**
* Env vars; set up at final launch stage
*/
protected final Map<String, String> envVars = new HashMap<>();
protected final ContainerLaunchContext containerLaunchContext =
Records.newRecord(ContainerLaunchContext.class);
protected final List<String> commands = new ArrayList<>(20);
protected final Map<String, LocalResource> localResources = new HashMap<>();
protected final Map<String, String> mountPaths = new HashMap<>();
private final Map<String, ByteBuffer> serviceData = new HashMap<>();
// security
protected final Credentials credentials;
protected boolean yarnDockerMode = false;
protected String dockerImage;
protected String dockerNetwork = DEFAULT_DOCKER_NETWORK;
protected String dockerHostname;
protected String runPrivilegedContainer;
/**
* Create instance.
* @param coreFileSystem filesystem
* @param credentials initial set of credentials -null is permitted
*/
public AbstractLauncher(
CoreFileSystem coreFileSystem,
Credentials credentials) {
this.coreFileSystem = coreFileSystem;
this.credentials = credentials != null ? credentials: new Credentials();
}
public void setYarnDockerMode(boolean yarnDockerMode){
this.yarnDockerMode = yarnDockerMode;
}
/**
* Get the env vars to work on
* @return env vars
*/
public Map<String, String> getEnv() {
return envVars;
}
/**
* Get the launch commands.
* @return the live list of commands
*/
public List<String> getCommands() {
return commands;
}
public void addLocalResource(String subPath, LocalResource resource) {
localResources.put(subPath, resource);
}
public void addLocalResource(String subPath, LocalResource resource, String mountPath) {
localResources.put(subPath, resource);
mountPaths.put(subPath, mountPath);
}
/**
* Accessor to the credentials
* @return the credentials associated with this launcher
*/
public Credentials getCredentials() {
return credentials;
}
public void addCommand(String cmd) {
commands.add(cmd);
}
/**
* Complete the launch context (copy in env vars, etc).
* @return the container to launch
*/
public ContainerLaunchContext completeContainerLaunch() throws IOException {
String cmdStr = ServiceUtils.join(commands, " ", false);
log.debug("Completed setting up container command {}", cmdStr);
containerLaunchContext.setCommands(commands);
//env variables
if (log.isDebugEnabled()) {
log.debug("Environment variables");
for (Map.Entry<String, String> envPair : envVars.entrySet()) {
log.debug(" \"{}\"=\"{}\"", envPair.getKey(), envPair.getValue());
}
}
containerLaunchContext.setEnvironment(envVars);
//service data
if (log.isDebugEnabled()) {
log.debug("Service Data size");
for (Map.Entry<String, ByteBuffer> entry : serviceData.entrySet()) {
log.debug("\"{}\"=> {} bytes of data", entry.getKey(),
entry.getValue().array().length);
}
}
containerLaunchContext.setServiceData(serviceData);
// resources
dumpLocalResources();
containerLaunchContext.setLocalResources(localResources);
//tokens
log.debug("{} tokens", credentials.numberOfTokens());
containerLaunchContext.setTokens(CredentialUtils.marshallCredentials(
credentials));
if(yarnDockerMode){
Map<String, String> env = containerLaunchContext.getEnvironment();
env.put("YARN_CONTAINER_RUNTIME_TYPE", "docker");
env.put("YARN_CONTAINER_RUNTIME_DOCKER_IMAGE", dockerImage);
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK", dockerNetwork);
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME",
dockerHostname);
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER", runPrivilegedContainer);
StringBuilder sb = new StringBuilder();
for (Entry<String,String> mount : mountPaths.entrySet()) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(mount.getKey());
sb.append(":");
sb.append(mount.getValue());
}
env.put("YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS", sb.toString());
log.info("yarn docker env var has been set {}", containerLaunchContext.getEnvironment().toString());
}
return containerLaunchContext;
}
public void setRetryContext(int maxRetries, int retryInterval) {
ContainerRetryContext retryContext = ContainerRetryContext
.newInstance(ContainerRetryPolicy.RETRY_ON_ALL_ERRORS, null, maxRetries,
retryInterval);
containerLaunchContext.setContainerRetryContext(retryContext);
}
/**
* Dump local resources at debug level
*/
private void dumpLocalResources() {
if (log.isDebugEnabled()) {
log.debug("{} resources: ", localResources.size());
for (Map.Entry<String, LocalResource> entry : localResources.entrySet()) {
String key = entry.getKey();
LocalResource val = entry.getValue();
log.debug(key + "=" + ServiceUtils.stringify(val.getResource()));
}
}
}
/**
* This is critical for an insecure cluster -it passes
* down the username to YARN, and so gives the code running
* in containers the rights it needs to work with
* data.
* @throws IOException problems working with current user
*/
protected void propagateUsernameInInsecureCluster() throws IOException {
//insecure cluster: propagate user name via env variable
String userName = UserGroupInformation.getCurrentUser().getUserName();
envVars.put(YarnServiceConstants.HADOOP_USER_NAME, userName);
}
/**
* Utility method to set up the classpath
* @param classpath classpath to use
*/
public void setClasspath(ClasspathConstructor classpath) {
setEnv(CLASSPATH, classpath.buildClasspath());
}
/**
* Set an environment variable in the launch context
* @param var variable name
* @param value value (must be non null)
*/
public void setEnv(String var, String value) {
Preconditions.checkArgument(var != null, "null variable name");
Preconditions.checkArgument(value != null, "null value");
envVars.put(var, value);
}
public void putEnv(Map<String, String> map) {
envVars.putAll(map);
}
public void setDockerImage(String dockerImage) {
this.dockerImage = dockerImage;
}
public void setDockerNetwork(String dockerNetwork) {
this.dockerNetwork = dockerNetwork;
}
public void setDockerHostname(String dockerHostname) {
this.dockerHostname = dockerHostname;
}
public void setRunPrivilegedContainer(boolean runPrivilegedContainer) {
if (runPrivilegedContainer) {
this.runPrivilegedContainer = Boolean.toString(true);
} else {
this.runPrivilegedContainer = Boolean.toString(false);
}
}
}

View File

@ -0,0 +1,172 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.containerlaunch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
* build a classpath -allows for entries to be injected in front of
* YARN classpath as well as behind, adds appropriate separators,
* extraction of local classpath, etc.
*/
public class ClasspathConstructor {
public static final String CLASS_PATH_SEPARATOR = ApplicationConstants.CLASS_PATH_SEPARATOR;
private final List<String> pathElements = new ArrayList<>();
public ClasspathConstructor() {
}
/**
* Get the list of JARs from the YARN settings
* @param config configuration
*/
public List<String> yarnApplicationClasspath(Configuration config) {
String[] cp = config.getTrimmedStrings(
YarnConfiguration.YARN_APPLICATION_CLASSPATH,
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH);
return cp != null ? Arrays.asList(cp) : new ArrayList<String>(0);
}
@Override
public String toString() {
return buildClasspath();
}
public String buildClasspath() {
return ServiceUtils.join(pathElements,
CLASS_PATH_SEPARATOR,
false);
}
/**
* Get a copy of the path list
* @return the JARs
*/
public List<String> getPathElements() {
return Collections.unmodifiableList(pathElements);
}
/**
* Append an entry
* @param path path
*/
public void append(String path) {
pathElements.add(path);
}
/**
* Insert a path at the front of the list. This places it ahead of
* the standard YARN artifacts
* @param path path to the JAR. Absolute or relative -on the target
* system
*/
public void insert(String path) {
pathElements.add(0, path);
}
public void appendAll(Collection<String> paths) {
pathElements.addAll(paths);
}
public void insertAll(Collection<String> paths) {
pathElements.addAll(0, paths);
}
public void addLibDir(String pathToLibDir) {
append(buildLibDir(pathToLibDir));
}
public void insertLibDir(String pathToLibDir) {
insert(buildLibDir(pathToLibDir));
}
public void addClassDirectory(String pathToDir) {
append(appendDirectoryTerminator(pathToDir));
}
public void insertClassDirectory(String pathToDir) {
insert(buildLibDir(appendDirectoryTerminator(pathToDir)));
}
public void addRemoteClasspathEnvVar() {
append(ApplicationConstants.Environment.CLASSPATH.$$());
}
public void insertRemoteClasspathEnvVar() {
append(ApplicationConstants.Environment.CLASSPATH.$$());
}
/**
* Build a lib dir path
* @param pathToLibDir path to the directory; may or may not end with a
* trailing space
* @return a path to a lib dir that is compatible with the java classpath
*/
public String buildLibDir(String pathToLibDir) {
String dir = appendDirectoryTerminator(pathToLibDir);
dir += "*";
return dir;
}
private String appendDirectoryTerminator(String pathToLibDir) {
String dir = pathToLibDir.trim();
if (!dir.endsWith("/")) {
dir += "/";
}
return dir;
}
/**
* Split a classpath. This uses the local path separator so MUST NOT
* be used to work with remote classpaths
* @param localpath local path
* @return a splite
*/
public Collection<String> splitClasspath(String localpath) {
String separator = System.getProperty("path.separator");
return StringUtils.getStringCollection(localpath, separator);
}
/**
* Get the local JVM classpath split up
* @return the list of entries on the JVM classpath env var
*/
public Collection<String> localJVMClasspath() {
return splitClasspath(System.getProperty("java.class.path"));
}
}

View File

@ -0,0 +1,86 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.containerlaunch;
import com.google.common.base.Preconditions;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import java.util.ArrayList;
import java.util.List;
/**
* Build a single command line to include in the container commands;
* Special support for JVM command buildup.
*/
public class CommandLineBuilder {
protected final List<String> argumentList = new ArrayList<>(20);
/**
* Add an entry to the command list
* @param args arguments -these will be converted strings
*/
public void add(Object... args) {
for (Object arg : args) {
argumentList.add(arg.toString());
}
}
// Get the number of arguments
public int size() {
return argumentList.size();
}
/**
* Append the output and error files to the tail of the command
* @param stdout out
* @param stderr error. Set this to null to append into stdout
*/
public void addOutAndErrFiles(String stdout, String stderr) {
Preconditions.checkNotNull(stdout, "Null output file");
Preconditions.checkState(!stdout.isEmpty(), "output filename invalid");
// write out the path output
argumentList.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" +
stdout);
if (stderr != null) {
argumentList.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" +
stderr);
} else {
argumentList.add("2>&1");
}
}
/**
* This just returns the command line
* @see #build()
* @return the command line
*/
@Override
public String toString() {
return build();
}
/**
* Build the command line
* @return the command line
*/
public String build() {
return ServiceUtils.join(argumentList, " ");
}
}

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.containerlaunch;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.provider.ProviderService;
import org.apache.hadoop.yarn.service.provider.ProviderFactory;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class ContainerLaunchService extends AbstractService{
protected static final Logger LOG =
LoggerFactory.getLogger(ContainerLaunchService.class);
private ExecutorService executorService;
private SliderFileSystem fs;
public ContainerLaunchService(SliderFileSystem fs) {
super(ContainerLaunchService.class.getName());
this.fs = fs;
}
@Override
public void serviceInit(Configuration conf) throws Exception {
executorService = Executors.newCachedThreadPool();
super.serviceInit(conf);
}
@Override
protected void serviceStop() throws Exception {
if (executorService != null) {
executorService.shutdownNow();
}
super.serviceStop();
}
public void launchCompInstance(Service service,
ComponentInstance instance, Container container) {
ContainerLauncher launcher =
new ContainerLauncher(service, instance, container);
executorService.execute(launcher);
}
private class ContainerLauncher implements Runnable {
public final Container container;
public final Service service;
public ComponentInstance instance;
public ContainerLauncher(
Service service,
ComponentInstance instance, Container container) {
this.container = container;
this.service = service;
this.instance = instance;
}
@Override public void run() {
Component compSpec = instance.getCompSpec();
ProviderService provider = ProviderFactory.getProviderService(
compSpec.getArtifact());
AbstractLauncher launcher = new AbstractLauncher(fs, null);
try {
provider.buildContainerLaunchContext(launcher, service,
instance, fs, getConfig());
instance.getComponent().getScheduler().getNmClient()
.startContainerAsync(container,
launcher.completeContainerLaunch());
} catch (Exception e) {
LOG.error(instance.getCompInstanceId()
+ ": Failed to launch container. ", e);
}
}
}
}

View File

@ -0,0 +1,319 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.containerlaunch;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.client.api.TimelineClient;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import static org.apache.hadoop.yarn.conf.YarnConfiguration.*;
/**
* Utils to work with credentials and tokens.
*
* Designed to be movable to Hadoop core
*/
public final class CredentialUtils {
private CredentialUtils() {
}
private static final Logger LOG =
LoggerFactory.getLogger(CredentialUtils.class);
/**
* Save credentials to a byte buffer. Returns null if there were no
* credentials to save
* @param credentials credential set
* @return a byte buffer of serialized tokens
* @throws IOException if the credentials could not be written to the stream
*/
public static ByteBuffer marshallCredentials(Credentials credentials) throws IOException {
ByteBuffer buffer = null;
if (!credentials.getAllTokens().isEmpty()) {
DataOutputBuffer dob = new DataOutputBuffer();
try {
credentials.writeTokenStorageToStream(dob);
} finally {
dob.close();
}
buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
}
return buffer;
}
/**
* Save credentials to a file
* @param file file to save to (will be overwritten)
* @param credentials credentials to write
* @throws IOException
*/
public static void saveTokens(File file,
Credentials credentials) throws IOException {
try(DataOutputStream daos = new DataOutputStream(
new FileOutputStream(file))) {
credentials.writeTokenStorageToStream(daos);
}
}
/**
* Look up and return the resource manager's principal. This method
* automatically does the <code>_HOST</code> replacement in the principal and
* correctly handles HA resource manager configurations.
*
* From: YARN-4629
* @param conf the {@link Configuration} file from which to read the
* principal
* @return the resource manager's principal string
* @throws IOException thrown if there's an error replacing the host name
*/
public static String getRMPrincipal(Configuration conf) throws IOException {
String principal = conf.get(RM_PRINCIPAL, "");
String hostname;
Preconditions.checkState(!principal.isEmpty(), "Not set: " + RM_PRINCIPAL);
if (HAUtil.isHAEnabled(conf)) {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
if (yarnConf.get(RM_HA_ID) == null) {
// If RM_HA_ID is not configured, use the first of RM_HA_IDS.
// Any valid RM HA ID should work.
String[] rmIds = yarnConf.getStrings(RM_HA_IDS);
Preconditions.checkState((rmIds != null) && (rmIds.length > 0),
"Not set " + RM_HA_IDS);
yarnConf.set(RM_HA_ID, rmIds[0]);
}
hostname = yarnConf.getSocketAddr(
RM_ADDRESS,
DEFAULT_RM_ADDRESS,
DEFAULT_RM_PORT).getHostName();
} else {
hostname = conf.getSocketAddr(
RM_ADDRESS,
DEFAULT_RM_ADDRESS,
DEFAULT_RM_PORT).getHostName();
}
return SecurityUtil.getServerPrincipal(principal, hostname);
}
/**
* Create and add any filesystem delegation tokens with
* the RM(s) configured to be able to renew them. Returns null
* on an insecure cluster (i.e. harmless)
* @param conf configuration
* @param fs filesystem
* @param credentials credentials to update
* @return a list of all added tokens.
* @throws IOException
*/
public static Token<?>[] addRMRenewableFSDelegationTokens(Configuration conf,
FileSystem fs,
Credentials credentials) throws IOException {
Preconditions.checkArgument(conf != null);
Preconditions.checkArgument(credentials != null);
if (UserGroupInformation.isSecurityEnabled()) {
return fs.addDelegationTokens(CredentialUtils.getRMPrincipal(conf),
credentials);
}
return null;
}
/**
* Add an FS delegation token which can be renewed by the current user
* @param fs filesystem
* @param credentials credentials to update
* @throws IOException problems.
*/
public static void addSelfRenewableFSDelegationTokens(
FileSystem fs,
Credentials credentials) throws IOException {
Preconditions.checkArgument(fs != null);
Preconditions.checkArgument(credentials != null);
fs.addDelegationTokens(
getSelfRenewer(),
credentials);
}
public static String getSelfRenewer() throws IOException {
return UserGroupInformation.getLoginUser().getShortUserName();
}
/**
* Create and add an RM delegation token to the credentials
* @param yarnClient Yarn Client
* @param credentials to add token to
* @return the token which was added
* @throws IOException
* @throws YarnException
*/
public static Token<TokenIdentifier> addRMDelegationToken(YarnClient yarnClient,
Credentials credentials)
throws IOException, YarnException {
Configuration conf = yarnClient.getConfig();
Text rmPrincipal = new Text(CredentialUtils.getRMPrincipal(conf));
Text rmDTService = ClientRMProxy.getRMDelegationTokenService(conf);
Token<TokenIdentifier> rmDelegationToken =
ConverterUtils.convertFromYarn(
yarnClient.getRMDelegationToken(rmPrincipal),
rmDTService);
credentials.addToken(rmDelegationToken.getService(), rmDelegationToken);
return rmDelegationToken;
}
public static Token<TimelineDelegationTokenIdentifier> maybeAddTimelineToken(
Configuration conf,
Credentials credentials)
throws IOException, YarnException {
if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) {
LOG.debug("Timeline service enabled -fetching token");
try(TimelineClient timelineClient = TimelineClient.createTimelineClient()) {
timelineClient.init(conf);
timelineClient.start();
Token<TimelineDelegationTokenIdentifier> token =
timelineClient.getDelegationToken(
CredentialUtils.getRMPrincipal(conf));
credentials.addToken(token.getService(), token);
return token;
}
} else {
LOG.debug("Timeline service is disabled");
return null;
}
}
/**
* Filter a list of tokens from a set of credentials
* @param credentials credential source (a new credential set os re
* @param filter List of tokens to strip out
* @return a new, filtered, set of credentials
*/
public static Credentials filterTokens(Credentials credentials,
List<Text> filter) {
Credentials result = new Credentials(credentials);
Iterator<Token<? extends TokenIdentifier>> iter =
result.getAllTokens().iterator();
while (iter.hasNext()) {
Token<? extends TokenIdentifier> token = iter.next();
LOG.debug("Token {}", token.getKind());
if (filter.contains(token.getKind())) {
LOG.debug("Filtering token {}", token.getKind());
iter.remove();
}
}
return result;
}
public static String dumpTokens(Credentials credentials, String separator) {
ArrayList<Token<? extends TokenIdentifier>> sorted =
new ArrayList<>(credentials.getAllTokens());
Collections.sort(sorted, new TokenComparator());
StringBuilder buffer = new StringBuilder(sorted.size()* 128);
for (Token<? extends TokenIdentifier> token : sorted) {
buffer.append(tokenToString(token)).append(separator);
}
return buffer.toString();
}
/**
* Create a string for people to look at
* @param token token to convert to a string form
* @return a printable view of the token
*/
public static String tokenToString(Token<? extends TokenIdentifier> token) {
DateFormat df = DateFormat.getDateTimeInstance(
DateFormat.SHORT, DateFormat.SHORT);
StringBuilder buffer = new StringBuilder(128);
buffer.append(token.toString());
try {
TokenIdentifier ti = token.decodeIdentifier();
buffer.append("; ").append(ti);
if (ti instanceof AbstractDelegationTokenIdentifier) {
// details in human readable form, and compensate for information HDFS DT omits
AbstractDelegationTokenIdentifier dt = (AbstractDelegationTokenIdentifier) ti;
buffer.append("; Renewer: ").append(dt.getRenewer());
buffer.append("; Issued: ")
.append(df.format(new Date(dt.getIssueDate())));
buffer.append("; Max Date: ")
.append(df.format(new Date(dt.getMaxDate())));
}
} catch (IOException e) {
//marshall problem; not ours
LOG.debug("Failed to decode {}: {}", token, e, e);
}
return buffer.toString();
}
/**
* Get the expiry time of a token.
* @param token token to examine
* @return the time in milliseconds after which the token is invalid.
* @throws IOException
*/
public static long getTokenExpiryTime(Token token) throws IOException {
TokenIdentifier identifier = token.decodeIdentifier();
Preconditions.checkState(identifier instanceof AbstractDelegationTokenIdentifier,
"Token %s of type: %s has an identifier which cannot be examined: %s",
token, token.getClass(), identifier);
AbstractDelegationTokenIdentifier id =
(AbstractDelegationTokenIdentifier) identifier;
return id.getMaxDate();
}
private static class TokenComparator
implements Comparator<Token<? extends TokenIdentifier>>, Serializable {
@Override
public int compare(Token<? extends TokenIdentifier> left,
Token<? extends TokenIdentifier> right) {
return left.getKind().toString().compareTo(right.getKind().toString());
}
}
}

View File

@ -0,0 +1,181 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.containerlaunch;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.exceptions.BadConfigException;
import java.util.Map;
/**
* Command line builder purely for the Java CLI.
* Some of the <code>define</code> methods are designed to work with Hadoop tool and
* Slider launcher applications.
*/
public class JavaCommandLineBuilder extends CommandLineBuilder {
public JavaCommandLineBuilder() {
add(getJavaBinary());
}
/**
* Get the java binary. This is called in the constructor so don't try and
* do anything other than return a constant.
* @return the path to the Java binary
*/
protected String getJavaBinary() {
return ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java";
}
/**
* Set the size of the heap if a non-empty heap is passed in.
* @param heap empty string or something like "128M" ,"1G" etc. The value is
* trimmed.
*/
public void setJVMHeap(String heap) {
if (ServiceUtils.isSet(heap)) {
add("-Xmx" + heap.trim());
}
}
/**
* Turn Java assertions on
*/
public void enableJavaAssertions() {
add("-ea");
add("-esa");
}
/**
* Add a system property definition -must be used before setting the main entry point
* @param property
* @param value
*/
public void sysprop(String property, String value) {
Preconditions.checkArgument(property != null, "null property name");
Preconditions.checkArgument(value != null, "null value");
add("-D" + property + "=" + value);
}
public JavaCommandLineBuilder forceIPv4() {
sysprop("java.net.preferIPv4Stack", "true");
return this;
}
public JavaCommandLineBuilder headless() {
sysprop("java.awt.headless", "true");
return this;
}
public boolean addConfOption(Configuration conf, String key) {
return defineIfSet(key, conf.get(key));
}
/**
* Add a varargs list of configuration parameters if they are present
* @param conf configuration source
* @param keys keys
*/
public void addConfOptions(Configuration conf, String... keys) {
for (String key : keys) {
addConfOption(conf, key);
}
}
/**
* Add all configuration options which match the prefix
* @param conf configuration
* @param prefix prefix, e.g {@code "slider."}
* @return the number of entries copied
*/
public int addPrefixedConfOptions(Configuration conf, String prefix) {
int copied = 0;
for (Map.Entry<String, String> entry : conf) {
if (entry.getKey().startsWith(prefix)) {
define(entry.getKey(), entry.getValue());
copied++;
}
}
return copied;
}
/**
* Ass a configuration option to the command line of the application
* @param conf configuration
* @param key key
* @param defVal default value
* @return the resolved configuration option
* @throws IllegalArgumentException if key is null or the looked up value
* is null (that is: the argument is missing and devVal was null.
*/
public String addConfOptionToCLI(Configuration conf,
String key,
String defVal) {
Preconditions.checkArgument(key != null, "null key");
String val = conf.get(key, defVal);
define(key, val);
return val;
}
/**
* Add a <code>-D key=val</code> command to the CLI. This is very Hadoop API
* @param key key
* @param val value
* @throws IllegalArgumentException if either argument is null
*/
public void define(String key, String val) {
Preconditions.checkArgument(key != null, "null key");
Preconditions.checkArgument(val != null, "null value");
add("-D", key + "=" + val);
}
/**
* Add a <code>-D key=val</code> command to the CLI if <code>val</code>
* is not null
* @param key key
* @param val value
*/
public boolean defineIfSet(String key, String val) {
Preconditions.checkArgument(key != null, "null key");
if (val != null) {
define(key, val);
return true;
} else {
return false;
}
}
/**
* Add a mandatory config option
* @param conf configuration
* @param key key
* @throws BadConfigException if the key is missing
*/
public void addMandatoryConfOption(Configuration conf,
String key) throws BadConfigException {
if (!addConfOption(conf, key)) {
throw new BadConfigException("Missing configuration option: " + key);
}
}
}

View File

@ -0,0 +1,36 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
import org.apache.hadoop.yarn.service.exceptions.SliderException;
/**
* The system is in a bad state
*/
public class BadClusterStateException extends SliderException {
public BadClusterStateException(String message,
Object... args) {
super(EXIT_BAD_STATE, message, args);
}
public BadClusterStateException(Throwable throwable,
String message, Object... args) {
super(EXIT_BAD_STATE, throwable, message, args);
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
public class BadCommandArgumentsException extends SliderException {
public BadCommandArgumentsException(String s, Object... args) {
super(EXIT_COMMAND_ARGUMENT_ERROR, s, args);
}
public BadCommandArgumentsException(Throwable throwable, String message,
Object... args) {
super(EXIT_COMMAND_ARGUMENT_ERROR, throwable, message, args);
}
}

View File

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
/**
* An exception to raise on a bad configuration
*/
public class BadConfigException extends SliderException {
public BadConfigException(String s) {
super(EXIT_BAD_CONFIGURATION, s);
}
public BadConfigException(String message, Object... args) {
super(EXIT_BAD_CONFIGURATION, message, args);
}
public BadConfigException(
Throwable throwable,
String message, Object... args) {
super(EXIT_BAD_CONFIGURATION, throwable, message, args);
}
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
public interface ErrorStrings {
String PRINTF_E_INSTANCE_ALREADY_EXISTS = "Service Instance \"%s\" already exists and is defined in %s";
String PRINTF_E_INSTANCE_DIR_ALREADY_EXISTS = "Service Instance dir already exists: %s";
/**
* ERROR Strings
*/
String ERROR_NO_ACTION = "No action specified";
String ERROR_UNKNOWN_ACTION = "Unknown command: ";
String ERROR_NOT_ENOUGH_ARGUMENTS =
"Not enough arguments for action: ";
String ERROR_PARSE_FAILURE =
"Failed to parse ";
/**
* All the remaining values after argument processing
*/
String ERROR_TOO_MANY_ARGUMENTS =
"Too many arguments";
String ERROR_DUPLICATE_ENTRY = "Duplicate entry for ";
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
/**
* Get the exit code of an exception. Making it an interface allows
* us to retrofit exit codes onto existing classes
*/
public interface ExitCodeProvider {
/**
* Method to get the exit code
* @return the exit code
*/
int getExitCode();
}

View File

@ -0,0 +1,196 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
/*
* Common Exit codes
* <p>
* Exit codes from 64 up are service specific.
* <p>
* Many of the exit codes are designed to resemble HTTP error codes,
* squashed into a single byte. e.g 44 , "not found" is the equivalent
* of 404
* <pre>
* 0-10: general command issues
* 30-39: equivalent to the 3XX responses, where those responses are
* considered errors by the service.
* 40-49: request-related errors
* 50-59: server-side problems. These may be triggered by the request.
* 64- : service specific error codes
* </pre>
*/
public interface LauncherExitCodes {
/**
* 0: success
*/
int EXIT_SUCCESS = 0;
/**
* -1: generic "false" response. The operation worked but
* the result was not true
*/
int EXIT_FALSE = -1;
/**
* Exit code when a client requested service termination: {@value}
*/
int EXIT_CLIENT_INITIATED_SHUTDOWN = 1;
/**
* Exit code when targets could not be launched: {@value}
*/
int EXIT_TASK_LAUNCH_FAILURE = 2;
/**
* Exit code when a control-C, kill -3, signal was picked up: {@value}
*/
int EXIT_INTERRUPTED = 3;
/**
* Exit code when a usage message was printed: {@value}
*/
int EXIT_USAGE = 4;
/**
* Exit code when something happened but we can't be specific: {@value}
*/
int EXIT_OTHER_FAILURE = 5;
/**
* Exit code on connectivity problems: {@value}
*/
int EXIT_MOVED = 31;
/**
* found: {@value}.
* <p>
* This is low value as in HTTP it is normally a success/redirect;
* whereas on the command line 0 is the sole success code.
* <p>
* <code>302 Found</code>
*/
int EXIT_FOUND = 32;
/**
* Exit code on a request where the destination has not changed
* and (somehow) the command specified that this is an error.
* That is, this exit code is somehow different from a "success"
* : {@value}
* <p>
* <code>304 Not Modified </code>
*/
int EXIT_NOT_MODIFIED = 34;
/**
* Exit code when the command line doesn't parse: {@value}, or
* when it is otherwise invalid.
* <p>
* <code>400 BAD REQUEST</code>
*/
int EXIT_COMMAND_ARGUMENT_ERROR = 40;
/**
* The request requires user authentication: {@value}
* <p>
* <code>401 Unauthorized</code>
*/
int EXIT_UNAUTHORIZED = 41;
/**
* Forbidden action: {@value}
* <p>
* <code>403: Forbidden</code>
*/
int EXIT_FORBIDDEN = 43;
/**
* Something was not found: {@value}
* <p>
* <code>404: NOT FOUND</code>
*/
int EXIT_NOT_FOUND = 44;
/**
* The operation is not allowed: {@value}
* <p>
* <code>405: NOT ALLOWED</code>
*/
int EXIT_OPERATION_NOT_ALLOWED = 45;
/**
* The command is somehow not acceptable: {@value}
* <p>
* <code>406: NOT ACCEPTABLE</code>
*/
int EXIT_NOT_ACCEPTABLE = 46;
/**
* Exit code on connectivity problems: {@value}
* <p>
* <code>408: Request Timeout</code>
*/
int EXIT_CONNECTIVITY_PROBLEM = 48;
/**
* The request could not be completed due to a conflict with the current
* state of the resource. {@value}
* <p>
* <code>409: conflict</code>
*/
int EXIT_CONFLICT = 49;
/**
* internal error: {@value}
* <p>
* <code>500 Internal Server Error</code>
*/
int EXIT_INTERNAL_ERROR = 50;
/**
* Unimplemented feature: {@value}
* <p>
* <code>501: Not Implemented</code>
*/
int EXIT_UNIMPLEMENTED = 51;
/**
* Service Unavailable; it may be available later: {@value}
* <p>
* <code>503 Service Unavailable</code>
*/
int EXIT_SERVICE_UNAVAILABLE = 53;
/**
* The service does not support, or refuses to support this version: {@value}.
* If raised, this is expected to be raised server-side and likely due
* to client/server version incompatibilities.
* <p>
* <code> 505: Version Not Supported</code>
*/
int EXIT_UNSUPPORTED_VERSION = 55;
/**
* Exit code when an exception was thrown from the service: {@value}
* <p>
* <code>5XX</code>
*/
int EXIT_EXCEPTION_THROWN = 56;
}

View File

@ -0,0 +1,92 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
public interface RestApiErrorMessages {
String ERROR_APPLICATION_NAME_INVALID =
"Service name is either empty or not provided";
String ERROR_APPLICATION_NAME_INVALID_FORMAT =
"Service name %s is not valid - only lower case letters, digits, " +
"and hyphen are allowed, and the name must be no more " +
"than 63 characters";
String ERROR_COMPONENT_NAME_INVALID =
"Component name must be no more than %s characters: %s";
String ERROR_USER_NAME_INVALID =
"User name must be no more than 63 characters";
String ERROR_APPLICATION_NOT_RUNNING = "Service not running";
String ERROR_APPLICATION_DOES_NOT_EXIST = "Service not found";
String ERROR_APPLICATION_IN_USE = "Service already exists in started"
+ " state";
String ERROR_APPLICATION_INSTANCE_EXISTS = "Service already exists in"
+ " stopped/failed state (either restart with PUT or destroy with DELETE"
+ " before creating a new one)";
String ERROR_SUFFIX_FOR_COMPONENT =
" for component %s (nor at the global level)";
String ERROR_ARTIFACT_INVALID = "Artifact is not provided";
String ERROR_ARTIFACT_FOR_COMP_INVALID =
ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
String ERROR_ARTIFACT_ID_INVALID =
"Artifact id (like docker image name) is either empty or not provided";
String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
String ERROR_RESOURCE_INVALID = "Resource is not provided";
String ERROR_RESOURCE_FOR_COMP_INVALID =
ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
String ERROR_RESOURCE_MEMORY_INVALID =
"Service resource or memory not provided";
String ERROR_RESOURCE_CPUS_INVALID =
"Service resource or cpus not provided";
String ERROR_RESOURCE_CPUS_INVALID_RANGE =
"Unacceptable no of cpus specified, either zero or negative";
String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID =
ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID =
ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE =
ERROR_RESOURCE_CPUS_INVALID_RANGE
+ " for component %s (or at the global level)";
String ERROR_CONTAINERS_COUNT_INVALID =
"Invalid no of containers specified";
String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID =
ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
String ERROR_DEPENDENCY_INVALID = "Dependency %s for component %s is " +
"invalid, does not exist as a component";
String ERROR_DEPENDENCY_CYCLE = "Invalid dependencies, a cycle may " +
"exist: %s";
String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED =
"Cannot specify" + " cpus/memory along with profile";
String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED =
ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED
+ " for component %s";
String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET =
"Resource profile is not " + "supported yet. Please specify cpus/memory.";
String ERROR_NULL_ARTIFACT_ID =
"Artifact Id can not be null if artifact type is none";
String ERROR_ABSENT_NUM_OF_INSTANCE =
"Num of instances should appear either globally or per component";
String ERROR_ABSENT_LAUNCH_COMMAND =
"Launch_command is required when type is not DOCKER";
String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
+ " component level, needs corresponding values set at service level";
}

View File

@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
import org.apache.hadoop.yarn.exceptions.YarnException;
/**
* A service launch exception that includes an exit code;
* when caught by the ServiceLauncher, it will convert that
* into a process exit code.
*/
public class ServiceLaunchException extends YarnException
implements ExitCodeProvider, LauncherExitCodes {
private final int exitCode;
/**
* Create an exception with the specific exit code
* @param exitCode exit code
* @param cause cause of the exception
*/
public ServiceLaunchException(int exitCode, Throwable cause) {
super(cause);
this.exitCode = exitCode;
}
/**
* Create an exception with the specific exit code and text
* @param exitCode exit code
* @param message message to use in exception
*/
public ServiceLaunchException(int exitCode, String message) {
super(message);
this.exitCode = exitCode;
}
/**
* Create an exception with the specific exit code, text and cause
* @param exitCode exit code
* @param message message to use in exception
* @param cause cause of the exception
*/
public ServiceLaunchException(int exitCode, String message, Throwable cause) {
super(message, cause);
this.exitCode = exitCode;
}
/**
* Get the exit code
* @return the exit code
*/
@Override
public int getExitCode() {
return exitCode;
}
}

View File

@ -0,0 +1,66 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
import org.apache.hadoop.yarn.service.conf.SliderExitCodes;
public class SliderException extends ServiceLaunchException implements
SliderExitCodes {
public SliderException() {
super(EXIT_EXCEPTION_THROWN, "SliderException");
}
public SliderException(int code, String message) {
super(code, message);
}
public SliderException(String s) {
super(EXIT_EXCEPTION_THROWN, s);
}
public SliderException(String s, Throwable throwable) {
super(EXIT_EXCEPTION_THROWN, s, throwable);
}
/**
* Format the exception as you create it
* @param code exit code
* @param message exception message -sprintf formatted
* @param args arguments for the formatting
*/
public SliderException(int code, String message, Object... args) {
super(code, String.format(message, args));
}
/**
* Format the exception, include a throwable.
* The throwable comes before the message so that it is out of the varargs
* @param code exit code
* @param throwable thrown
* @param message message
* @param args arguments
*/
public SliderException(int code,
Throwable throwable,
String message,
Object... args) {
super(code, String.format(message, args), throwable);
}
}

View File

@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.exceptions;
/**
* Used to raise a usage exception ... this has the exit code
* {@link #EXIT_USAGE}
*/
public class UsageException extends SliderException {
public UsageException(String s, Object... args) {
super(EXIT_USAGE, s, args);
}
public UsageException(Throwable throwable, String message,
Object... args) {
super(EXIT_USAGE, throwable, message, args);
}
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.impl.pb.client;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.service.ClientAMProtocol;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
import org.apache.hadoop.yarn.service.impl.pb.service.ClientAMProtocolPB;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto;
public class ClientAMProtocolPBClientImpl
implements ClientAMProtocol, Closeable {
private ClientAMProtocolPB proxy;
public ClientAMProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, ClientAMProtocolPB.class,
ProtobufRpcEngine.class);
proxy = RPC.getProxy(ClientAMProtocolPB.class, clientVersion, addr, conf);
}
@Override public FlexComponentsResponseProto flexComponents(
FlexComponentsRequestProto request) throws IOException, YarnException {
try {
return proxy.flexComponents(null, request);
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
}
return null;
}
@Override
public GetStatusResponseProto getStatus(GetStatusRequestProto request)
throws IOException, YarnException {
try {
return proxy.getStatus(null, request);
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
}
return null;
}
@Override
public StopResponseProto stop(StopRequestProto requestProto)
throws IOException, YarnException {
try {
return proxy.stop(null, requestProto);
} catch (ServiceException e) {
RPCUtil.unwrapAndThrowException(e);
}
return null;
}
@Override public void close() {
if (this.proxy != null) {
RPC.stopProxy(this.proxy);
}
}
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.impl.pb.service;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.yarn.proto.ClientAMProtocol;
@ProtocolInfo(
protocolName = "org.apache.hadoop.yarn.service.ClientAMProtocol",
protocolVersion = 1)
public interface ClientAMProtocolPB extends
ClientAMProtocol.ClientAMProtocolService.BlockingInterface {
}

View File

@ -0,0 +1,70 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.impl.pb.service;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
import org.apache.hadoop.yarn.service.ClientAMProtocol;
import java.io.IOException;
public class ClientAMProtocolPBServiceImpl implements ClientAMProtocolPB {
private ClientAMProtocol real;
public ClientAMProtocolPBServiceImpl(ClientAMProtocol impl) {
this.real = impl;
}
@Override
public FlexComponentsResponseProto flexComponents(RpcController controller,
FlexComponentsRequestProto request) throws ServiceException {
try {
return real.flexComponents(request);
} catch (IOException | YarnException e) {
throw new ServiceException(e);
}
}
@Override public GetStatusResponseProto getStatus(RpcController controller,
GetStatusRequestProto request) throws ServiceException {
try {
return real.getStatus(request);
} catch (IOException | YarnException e) {
throw new ServiceException(e);
}
}
@Override
public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto stop(
RpcController controller,
org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request)
throws ServiceException {
try {
return real.stop(request);
} catch (IOException | YarnException e) {
throw new ServiceException(e);
}
}
}

View File

@ -0,0 +1,147 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.service.ServiceContext;
import org.apache.hadoop.yarn.service.component.Component;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
import org.apache.hadoop.yarn.service.component.ComponentEvent;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
import org.apache.hadoop.yarn.service.component.ComponentState;
import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.STARTED;
import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_NOT_READY;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL;
public class ServiceMonitor extends AbstractService {
private static final Logger LOG =
LoggerFactory.getLogger(ServiceMonitor.class);
public ScheduledExecutorService executorService;
private Map<ContainerId, ComponentInstance> liveInstances = null;
private ServiceContext context;
private Configuration conf;
public ServiceMonitor(String name, ServiceContext context) {
super(name);
liveInstances = context.scheduler.getLiveInstances();
this.context = context;
}
@Override
public void serviceInit(Configuration conf) throws Exception {
executorService = Executors.newScheduledThreadPool(1);
this.conf = conf;
super.serviceInit(conf);
}
@Override
public void serviceStart() throws Exception {
long readinessCheckInterval = YarnServiceConf
.getLong(READINESS_CHECK_INTERVAL, DEFAULT_READINESS_CHECK_INTERVAL,
context.service.getConfiguration(), conf);
executorService
.scheduleAtFixedRate(new ReadinessChecker(), readinessCheckInterval,
readinessCheckInterval, TimeUnit.SECONDS);
// Default 6 hours.
long failureResetInterval = YarnServiceConf
.getLong(CONTAINER_FAILURE_WINDOW, 21600,
context.service.getConfiguration(), conf);
executorService
.scheduleAtFixedRate(new ContainerFailureReset(), failureResetInterval,
failureResetInterval, TimeUnit.SECONDS);
}
@Override
public void serviceStop() throws Exception {
if (executorService != null) {
executorService.shutdownNow();
}
}
private class ReadinessChecker implements Runnable {
@Override
public void run() {
// check if the comp instance are ready
for (Map.Entry<ContainerId, ComponentInstance> entry : liveInstances
.entrySet()) {
ComponentInstance instance = entry.getValue();
ProbeStatus status = instance.ping();
if (status.isSuccess()) {
if (instance.getState() == STARTED) {
// synchronously update the state.
instance.handle(
new ComponentInstanceEvent(entry.getKey(), BECOME_READY));
}
} else {
if (instance.getState() == READY) {
instance.handle(
new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY));
}
}
}
for (Component component : context.scheduler.getAllComponents()
.values()) {
// If comp hasn't started yet and its dependencies are satisfied
if (component.getState() == ComponentState.INIT && component
.areDependenciesReady()) {
LOG.info("[COMPONENT {}]: Dependencies satisfied, ramping up.",
component.getName());
ComponentEvent event = new ComponentEvent(component.getName(), FLEX)
.setDesired(component.getComponentSpec().getNumberOfContainers());
component.handle(event);
}
}
}
}
private class ContainerFailureReset implements Runnable {
@Override
public void run() {
for (Component component : context.scheduler.getAllComponents().values()) {
component.resetCompFailureCount();
}
}
}
}

View File

@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Map;
public class HttpProbe extends Probe {
protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class);
private static final String HOST_TOKEN = "${THIS_HOST}";
private final String urlString;
private final int timeout;
private final int min, max;
public HttpProbe(String url, int timeout, int min, int max, Configuration
conf) {
super("Http probe of " + url + " [" + min + "-" + max + "]", conf);
this.urlString = url;
this.timeout = timeout;
this.min = min;
this.max = max;
}
public static HttpProbe create(Map<String, String> props)
throws IOException {
String urlString = getProperty(props, WEB_PROBE_URL, null);
new URL(urlString);
int timeout = getPropertyInt(props, WEB_PROBE_CONNECT_TIMEOUT,
WEB_PROBE_CONNECT_TIMEOUT_DEFAULT);
int minSuccess = getPropertyInt(props, WEB_PROBE_MIN_SUCCESS,
WEB_PROBE_MIN_SUCCESS_DEFAULT);
int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS,
WEB_PROBE_MAX_SUCCESS_DEFAULT);
return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null);
}
private static HttpURLConnection getConnection(URL url, int timeout) throws
IOException {
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setInstanceFollowRedirects(true);
connection.setConnectTimeout(timeout);
return connection;
}
@Override
public ProbeStatus ping(ComponentInstance instance) {
ProbeStatus status = new ProbeStatus();
ContainerStatus containerStatus = instance.getContainerStatus();
if (containerStatus == null || ServiceUtils.isEmpty(containerStatus.getIPs())
|| StringUtils.isEmpty(containerStatus.getHost())) {
status.fail(this, new IOException("IP is not available yet"));
return status;
}
String ip = containerStatus.getIPs().get(0);
HttpURLConnection connection = null;
try {
URL url = new URL(urlString.replace(HOST_TOKEN, ip));
connection = getConnection(url, this.timeout);
int rc = connection.getResponseCode();
if (rc < min || rc > max) {
String error = "Probe " + url + " error code: " + rc;
log.info(error);
status.fail(this,
new IOException(error));
} else {
status.succeed(this);
}
} catch (Throwable e) {
String error = "Probe " + urlString + " failed for IP " + ip + ": " + e;
log.info(error, e);
status.fail(this,
new IOException(error, e));
} finally {
if (connection != null) {
connection.disconnect();
}
}
return status;
}
}

View File

@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
/**
* Build up log entries for ease of splunk
*/
public class LogEntryBuilder {
private final StringBuilder builder = new StringBuilder();
public LogEntryBuilder() {
}
public LogEntryBuilder(String text) {
elt(text);
}
public LogEntryBuilder(String name, Object value) {
entry(name, value);
}
public LogEntryBuilder elt(String text) {
addComma();
builder.append(text);
return this;
}
public LogEntryBuilder elt(String name, Object value) {
addComma();
entry(name, value);
return this;
}
private void addComma() {
if (!isEmpty()) {
builder.append(", ");
}
}
private void entry(String name, Object value) {
builder.append(name).append('=');
if (value != null) {
builder.append('"').append(value.toString()).append('"');
} else {
builder.append("null");
}
}
@Override
public String toString() {
return builder.toString();
}
private boolean isEmpty() {
return builder.length() == 0;
}
}

View File

@ -0,0 +1,66 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
/**
* Config keys for monitoring
*/
public interface MonitorKeys {
/**
* Port probing key : port to attempt to create a TCP connection to {@value}.
*/
String PORT_PROBE_PORT = "port";
/**
* Port probing key : timeout for the the connection attempt {@value}.
*/
String PORT_PROBE_CONNECT_TIMEOUT = "timeout";
/**
* Port probing default : timeout for the connection attempt {@value}.
*/
int PORT_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000;
/**
* Web probing key : URL {@value}.
*/
String WEB_PROBE_URL = "url";
/**
* Web probing key : min success code {@value}.
*/
String WEB_PROBE_MIN_SUCCESS = "min.success";
/**
* Web probing key : max success code {@value}.
*/
String WEB_PROBE_MAX_SUCCESS = "max.success";
/**
* Web probing default : min successful response code {@value}.
*/
int WEB_PROBE_MIN_SUCCESS_DEFAULT = 200;
/**
* Web probing default : max successful response code {@value}.
*/
int WEB_PROBE_MAX_SUCCESS_DEFAULT = 299;
/**
* Web probing key : timeout for the connection attempt {@value}
*/
String WEB_PROBE_CONNECT_TIMEOUT = "timeout";
/**
* Port probing default : timeout for the connection attempt {@value}.
*/
int WEB_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000;
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
import org.apache.hadoop.yarn.service.api.records.ReadinessCheck;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Formatter;
import java.util.Locale;
/**
* Various utils to work with the monitor
*/
public final class MonitorUtils {
protected static final Logger LOG = LoggerFactory.getLogger(MonitorUtils
.class);
private MonitorUtils() {
}
public static String toPlural(int val) {
return val != 1 ? "s" : "";
}
/**
* Convert milliseconds to human time -the exact format is unspecified
* @param milliseconds a time in milliseconds
* @return a time that is converted to human intervals
*/
public static String millisToHumanTime(long milliseconds) {
StringBuilder sb = new StringBuilder();
// Send all output to the Appendable object sb
Formatter formatter = new Formatter(sb, Locale.US);
long s = Math.abs(milliseconds / 1000);
long m = Math.abs(milliseconds % 1000);
if (milliseconds > 0) {
formatter.format("%d.%03ds", s, m);
} else if (milliseconds == 0) {
formatter.format("0");
} else {
formatter.format("-%d.%03ds", s, m);
}
return sb.toString();
}
public static Probe getProbe(ReadinessCheck readinessCheck) {
if (readinessCheck == null) {
return null;
}
if (readinessCheck.getType() == null) {
return null;
}
try {
switch (readinessCheck.getType()) {
case HTTP:
return HttpProbe.create(readinessCheck.getProperties());
case PORT:
return PortProbe.create(readinessCheck.getProperties());
default:
return null;
}
} catch (Throwable t) {
throw new IllegalArgumentException("Error creating readiness check " +
t);
}
}
}

View File

@ -0,0 +1,98 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.Map;
/**
* Probe for a port being open.
*/
public class PortProbe extends Probe {
protected static final Logger log = LoggerFactory.getLogger(PortProbe.class);
private final int port;
private final int timeout;
public PortProbe(int port, int timeout) {
super("Port probe of " + port + " for " + timeout + "ms", null);
this.port = port;
this.timeout = timeout;
}
public static PortProbe create(Map<String, String> props)
throws IOException {
int port = getPropertyInt(props, PORT_PROBE_PORT, null);
if (port >= 65536) {
throw new IOException(PORT_PROBE_PORT + " " + port + " is out of " +
"range");
}
int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT,
PORT_PROBE_CONNECT_TIMEOUT_DEFAULT);
return new PortProbe(port, timeout);
}
/**
* Try to connect to the (host,port); a failure to connect within
* the specified timeout is a failure.
* @param instance role instance
* @return the outcome
*/
@Override
public ProbeStatus ping(ComponentInstance instance) {
ProbeStatus status = new ProbeStatus();
if (instance.getContainerStatus() == null || ServiceUtils
.isEmpty(instance.getContainerStatus().getIPs())) {
status.fail(this, new IOException(
instance.getCompInstanceName() + ": IP is not available yet"));
return status;
}
String ip = instance.getContainerStatus().getIPs().get(0);
InetSocketAddress sockAddr = new InetSocketAddress(ip, port);
Socket socket = new Socket();
try {
if (log.isDebugEnabled()) {
log.debug(instance.getCompInstanceName() + ": Connecting " + sockAddr
.toString() + ", timeout=" + MonitorUtils
.millisToHumanTime(timeout));
}
socket.connect(sockAddr, timeout);
status.succeed(this);
} catch (Throwable e) {
String error =
instance.getCompInstanceName() + ": Probe " + sockAddr + " failed";
log.debug(error, e);
status.fail(this, new IOException(error, e));
} finally {
IOUtils.closeSocket(socket);
}
return status;
}
}

View File

@ -0,0 +1,100 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import java.io.IOException;
import java.util.Map;
/**
* Base class of all probes.
*/
public abstract class Probe implements MonitorKeys {
protected final Configuration conf;
private String name;
/**
* Create a probe of a specific name
*
* @param name probe name
* @param conf configuration being stored.
*/
public Probe(String name, Configuration conf) {
this.name = name;
this.conf = conf;
}
protected void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
@Override
public String toString() {
return getName();
}
public static String getProperty(Map<String, String> props, String name,
String defaultValue) throws IOException {
String value = props.get(name);
if (StringUtils.isEmpty(value)) {
if (defaultValue == null) {
throw new IOException(name + " not specified");
}
return defaultValue;
}
return value;
}
public static int getPropertyInt(Map<String, String> props, String name,
Integer defaultValue) throws IOException {
String value = props.get(name);
if (StringUtils.isEmpty(value)) {
if (defaultValue == null) {
throw new IOException(name + " not specified");
}
return defaultValue;
}
return Integer.parseInt(value);
}
/**
* perform any prelaunch initialization
*/
public void init() throws IOException {
}
/**
* Ping the endpoint. All exceptions must be caught and included in the
* (failure) status.
*
* @param instance instance to ping
* @return the status
*/
public abstract ProbeStatus ping(ComponentInstance instance);
}

View File

@ -0,0 +1,160 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor.probe;
import java.io.Serializable;
import java.util.Date;
/**
* Status message of a probe. This is designed to be sent over the wire, though the exception
* Had better be unserializable at the far end if that is to work.
*/
public final class ProbeStatus implements Serializable {
private static final long serialVersionUID = 165468L;
private long timestamp;
private String timestampText;
private boolean success;
private boolean realOutcome;
private String message;
private Throwable thrown;
private transient Probe originator;
public ProbeStatus() {
}
public ProbeStatus(long timestamp, String message, Throwable thrown) {
this.success = false;
this.message = message;
this.thrown = thrown;
setTimestamp(timestamp);
}
public ProbeStatus(long timestamp, String message) {
this.success = true;
setTimestamp(timestamp);
this.message = message;
this.thrown = null;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
timestampText = new Date(timestamp).toString();
}
public boolean isSuccess() {
return success;
}
/**
* Set both the success and the real outcome bits to the same value
* @param success the new value
*/
public void setSuccess(boolean success) {
this.success = success;
realOutcome = success;
}
public String getTimestampText() {
return timestampText;
}
public boolean getRealOutcome() {
return realOutcome;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public Throwable getThrown() {
return thrown;
}
public void setThrown(Throwable thrown) {
this.thrown = thrown;
}
/**
* Get the probe that generated this result. May be null
* @return a possibly null reference to a probe
*/
public Probe getOriginator() {
return originator;
}
/**
* The probe has succeeded -capture the current timestamp, set
* success to true, and record any other data needed.
* @param probe probe
*/
public void succeed(Probe probe) {
finish(probe, true, probe.getName(), null);
}
/**
* A probe has failed either because the test returned false, or an exception
* was thrown. The {@link #success} field is set to false, any exception
* thrown is recorded.
* @param probe probe that failed
* @param thrown an exception that was thrown.
*/
public void fail(Probe probe, Throwable thrown) {
finish(probe, false, "Failure in " + probe, thrown);
}
public void finish(Probe probe, boolean succeeded, String text, Throwable thrown) {
setTimestamp(System.currentTimeMillis());
setSuccess(succeeded);
originator = probe;
message = text;
this.thrown = thrown;
}
@Override
public String toString() {
LogEntryBuilder builder = new LogEntryBuilder("Probe Status");
builder.elt("time", timestampText)
.elt("outcome", (success ? "success" : "failure"));
if (success != realOutcome) {
builder.elt("originaloutcome", (realOutcome ? "success" : "failure"));
}
builder.elt("message", message);
if (thrown != null) {
builder.elt("exception", thrown);
}
return builder.toString();
}
/**
* Flip the success bit on while the real outcome bit is kept false
*/
public void markAsSuccessful() {
success = true;
}
}

View File

@ -0,0 +1,129 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.provider;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import java.io.IOException;
import java.nio.file.Paths;
import java.text.MessageFormat;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants.CONTENT;
public abstract class AbstractClientProvider {
public AbstractClientProvider() {
}
/**
* Generates a fixed format of application tags given one or more of
* application name, version and description. This allows subsequent query for
* an application with a name only, version only or description only or any
* combination of those as filters.
*
* @param appName name of the application
* @param appVersion version of the application
* @param appDescription brief description of the application
* @return
*/
public static final Set<String> createApplicationTags(String appName,
String appVersion, String appDescription) {
Set<String> tags = new HashSet<>();
tags.add(ServiceUtils.createNameTag(appName));
if (appVersion != null) {
tags.add(ServiceUtils.createVersionTag(appVersion));
}
if (appDescription != null) {
tags.add(ServiceUtils.createDescriptionTag(appDescription));
}
return tags;
}
/**
* Validate the artifact.
* @param artifact
*/
public abstract void validateArtifact(Artifact artifact, FileSystem
fileSystem) throws IOException;
protected abstract void validateConfigFile(ConfigFile configFile, FileSystem
fileSystem) throws IOException;
/**
* Validate the config files.
* @param configFiles config file list
* @param fs file system
*/
public void validateConfigFiles(List<ConfigFile> configFiles,
FileSystem fs) throws IOException {
Set<String> destFileSet = new HashSet<>();
for (ConfigFile file : configFiles) {
if (file.getType() == null) {
throw new IllegalArgumentException("File type is empty");
}
if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE)) {
if (StringUtils.isEmpty(file.getSrcFile()) &&
!file.getProperties().containsKey(CONTENT)) {
throw new IllegalArgumentException(MessageFormat.format("For {0} " +
"format, either src_file must be specified in ConfigFile," +
" or the \"{1}\" key must be specified in " +
"the 'properties' field of ConfigFile. ",
ConfigFile.TypeEnum.TEMPLATE, CONTENT));
}
}
if (!StringUtils.isEmpty(file.getSrcFile())) {
Path p = new Path(file.getSrcFile());
if (!fs.exists(p)) {
throw new IllegalArgumentException(
"Src_file does not exist for config file: " + file
.getSrcFile());
}
}
if (StringUtils.isEmpty(file.getDestFile())) {
throw new IllegalArgumentException("Dest_file is empty.");
}
if (destFileSet.contains(file.getDestFile())) {
throw new IllegalArgumentException(
"Duplicated ConfigFile exists: " + file.getDestFile());
}
destFileSet.add(file.getDestFile());
java.nio.file.Path destPath = Paths.get(file.getDestFile());
if (!destPath.isAbsolute() && destPath.getNameCount() > 1) {
throw new IllegalArgumentException("Non-absolute dest_file has more " +
"than one path element");
}
// provider-specific validation
validateConfigFile(file, fs);
}
}
}

View File

@ -0,0 +1,113 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.provider;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.apache.hadoop.yarn.service.exceptions.SliderException;
import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
import org.apache.hadoop.yarn.service.containerlaunch.CommandLineBuilder;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.ServiceContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Map;
import java.util.Map.Entry;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX;
import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$;
public abstract class AbstractProviderService implements ProviderService,
YarnServiceConstants {
protected static final Logger log =
LoggerFactory.getLogger(AbstractProviderService.class);
public abstract void processArtifact(AbstractLauncher launcher,
ComponentInstance compInstance, SliderFileSystem fileSystem,
Service service)
throws IOException;
public void buildContainerLaunchContext(AbstractLauncher launcher,
Service service, ComponentInstance instance,
SliderFileSystem fileSystem, Configuration yarnConf)
throws IOException, SliderException {
Component component = instance.getComponent().getComponentSpec();;
processArtifact(launcher, instance, fileSystem, service);
ServiceContext context =
instance.getComponent().getScheduler().getContext();
// Generate tokens (key-value pair) for config substitution.
// Get pre-defined tokens
Map<String, String> globalTokens =
instance.getComponent().getScheduler().globalTokens;
Map<String, String> tokensForSubstitution = ProviderUtils
.initCompTokensForSubstitute(instance);
tokensForSubstitution.putAll(globalTokens);
// Set the environment variables in launcher
launcher.putEnv(ServiceUtils
.buildEnvMap(component.getConfiguration(), tokensForSubstitution));
launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
if (System.getenv(HADOOP_USER_NAME) != null) {
launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
}
launcher.setEnv("LANG", "en_US.UTF-8");
launcher.setEnv("LC_ALL", "en_US.UTF-8");
launcher.setEnv("LANGUAGE", "en_US.UTF-8");
for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
tokensForSubstitution.put($(entry.getKey()), entry.getValue());
}
//TODO add component host tokens?
// ProviderUtils.addComponentHostTokens(tokensForSubstitution, amState);
// create config file on hdfs and add local resource
ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
component, tokensForSubstitution, instance, context);
// substitute launch command
String launchCommand = component.getLaunchCommand();
// docker container may have empty commands
if (!StringUtils.isEmpty(launchCommand)) {
launchCommand = ProviderUtils
.substituteStrWithTokens(launchCommand, tokensForSubstitution);
CommandLineBuilder operation = new CommandLineBuilder();
operation.add(launchCommand);
operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
launcher.addCommand(operation.build());
}
// By default retry forever every 30 seconds
launcher.setRetryContext(YarnServiceConf
.getInt(CONTAINER_RETRY_MAX, -1, service.getConfiguration(),
yarnConf), YarnServiceConf
.getInt(CONTAINER_RETRY_INTERVAL, 30000, service.getConfiguration(),
yarnConf));
}
}

View File

@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.provider;
import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory;
import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory;
import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for factories.
*/
public abstract class ProviderFactory {
protected static final Logger LOG =
LoggerFactory.getLogger(ProviderFactory.class);
protected ProviderFactory() {}
public abstract AbstractClientProvider createClientProvider();
public abstract ProviderService createServerProvider();
public static synchronized ProviderService getProviderService(Artifact
artifact) {
return createServiceProviderFactory(artifact).createServerProvider();
}
public static synchronized AbstractClientProvider getClientProvider(Artifact
artifact) {
return createServiceProviderFactory(artifact).createClientProvider();
}
/**
* Create a provider for a specific service
* @param artifact artifact
* @return provider factory
*/
public static synchronized ProviderFactory createServiceProviderFactory(
Artifact artifact) {
if (artifact == null || artifact.getType() == null) {
LOG.debug("Loading service provider type default");
return DefaultProviderFactory.getInstance();
}
LOG.debug("Loading service provider type {}", artifact.getType());
switch (artifact.getType()) {
// TODO add handling for custom types?
// TODO handle service
case DOCKER:
return DockerProviderFactory.getInstance();
case TARBALL:
return TarballProviderFactory.getInstance();
default:
throw new IllegalArgumentException(String.format("Resolution error, " +
"%s should not be passed to createServiceProviderFactory",
artifact.getType()));
}
}
}

View File

@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.provider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.apache.hadoop.yarn.service.exceptions.SliderException;
import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import java.io.IOException;
public interface ProviderService {
/**
* Set up the entire container launch context
*/
void buildContainerLaunchContext(AbstractLauncher containerLauncher,
Service service, ComponentInstance instance,
SliderFileSystem sliderFileSystem, Configuration yarnConf)
throws IOException, SliderException;
}

View File

@ -0,0 +1,408 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.provider;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.service.ServiceContext;
import org.apache.hadoop.yarn.service.api.records.Service;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
import org.apache.hadoop.yarn.service.api.records.ConfigFormat;
import org.apache.hadoop.yarn.service.api.records.Configuration;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException;
import org.apache.hadoop.yarn.service.exceptions.SliderException;
import org.apache.hadoop.yarn.service.utils.PublishedConfiguration;
import org.apache.hadoop.yarn.service.utils.PublishedConfigurationOutputter;
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
import org.apache.hadoop.yarn.service.utils.ServiceUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.regex.Pattern;
import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
/**
* This is a factoring out of methods handy for providers. It's bonded to a log
* at construction time.
*/
public class ProviderUtils implements YarnServiceConstants {
protected static final Logger log =
LoggerFactory.getLogger(ProviderUtils.class);
/**
* Add oneself to the classpath. This does not work
* on minicluster test runs where the JAR is not built up.
* @param providerResources map of provider resources to add these entries to
* @param providerClass provider to add
* @param jarName name of the jar to use
* @param sliderFileSystem target filesystem
* @param tempPath path in the cluster FS for temp files
* @param libdir relative directory to place resources
* @param miniClusterTestRun true if minicluster is being used
* @return true if the class was found in a JAR
*
* @throws FileNotFoundException if the JAR was not found and this is NOT
* a mini cluster test run
* @throws IOException IO problems
* @throws SliderException any Slider problem
*/
public static boolean addProviderJar(
Map<String, LocalResource> providerResources,
Class providerClass,
String jarName,
SliderFileSystem sliderFileSystem,
Path tempPath,
String libdir,
boolean miniClusterTestRun) throws
IOException,
SliderException {
try {
ServiceUtils.putJar(providerResources,
sliderFileSystem,
providerClass,
tempPath,
libdir,
jarName);
return true;
} catch (FileNotFoundException e) {
if (miniClusterTestRun) {
return false;
} else {
throw e;
}
}
}
/**
* Loads all dependency jars from the default path.
* @param providerResources map of provider resources to add these entries to
* @param sliderFileSystem target filesystem
* @param tempPath path in the cluster FS for temp files
* @param libDir relative directory to place resources
* @param libLocalSrcDir explicitly supplied local libs dir
* @throws IOException trouble copying to HDFS
* @throws SliderException trouble copying to HDFS
*/
public static void addAllDependencyJars(
Map<String, LocalResource> providerResources,
SliderFileSystem sliderFileSystem,
Path tempPath,
String libDir,
String libLocalSrcDir)
throws IOException, SliderException {
if (ServiceUtils.isSet(libLocalSrcDir)) {
File file = new File(libLocalSrcDir);
if (!file.exists() || !file.isDirectory()) {
throw new BadCommandArgumentsException(
"Supplied lib src dir %s is not valid", libLocalSrcDir);
}
}
ServiceUtils.putAllJars(providerResources, sliderFileSystem, tempPath,
libDir, libLocalSrcDir);
}
public static String substituteStrWithTokens(String content,
Map<String, String> tokensForSubstitution) {
for (Map.Entry<String, String> token : tokensForSubstitution.entrySet()) {
content =
content.replaceAll(Pattern.quote(token.getKey()), token.getValue());
}
return content;
}
// configs will be substituted by corresponding env in tokenMap
public static void substituteMapWithTokens(Map<String, String> configs,
Map<String, String> tokenMap) {
for (Map.Entry<String, String> entry : configs.entrySet()) {
String value = entry.getValue();
if (tokenMap != null) {
for (Map.Entry<String, String> token : tokenMap.entrySet()) {
value =
value.replaceAll(Pattern.quote(token.getKey()), token.getValue());
}
}
entry.setValue(value);
}
}
/**
* Localize the service keytabs for the service.
* @param launcher container launcher
* @param fileSystem file system
* @throws IOException trouble uploading to HDFS
*/
public void localizeServiceKeytabs(AbstractLauncher launcher,
SliderFileSystem fileSystem, Service service) throws IOException {
Configuration conf = service.getConfiguration();
String keytabPathOnHost =
conf.getProperty(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH);
if (ServiceUtils.isUnset(keytabPathOnHost)) {
String amKeytabName =
conf.getProperty(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME);
String keytabDir =
conf.getProperty(YarnServiceConf.KEY_HDFS_KEYTAB_DIR);
// we need to localize the keytab files in the directory
Path keytabDirPath = fileSystem.buildKeytabPath(keytabDir, null,
service.getName());
boolean serviceKeytabsDeployed = false;
if (fileSystem.getFileSystem().exists(keytabDirPath)) {
FileStatus[] keytabs = fileSystem.getFileSystem().listStatus(
keytabDirPath);
LocalResource keytabRes;
for (FileStatus keytab : keytabs) {
if (!amKeytabName.equals(keytab.getPath().getName())
&& keytab.getPath().getName().endsWith(".keytab")) {
serviceKeytabsDeployed = true;
log.info("Localizing keytab {}", keytab.getPath().getName());
keytabRes = fileSystem.createAmResource(keytab.getPath(),
LocalResourceType.FILE);
launcher.addLocalResource(KEYTAB_DIR + "/" +
keytab.getPath().getName(),
keytabRes);
}
}
}
if (!serviceKeytabsDeployed) {
log.warn("No service keytabs for the service have been localized. "
+ "If the service requires keytabs for secure operation, "
+ "please ensure that the required keytabs have been uploaded "
+ "to the folder {}", keytabDirPath);
}
}
}
public static Path initCompInstanceDir(SliderFileSystem fs,
ComponentInstance instance) {
Path compDir = new Path(new Path(fs.getAppDir(), "components"),
instance.getCompName());
Path compInstanceDir = new Path(compDir, instance.getCompInstanceName());
instance.setCompInstanceDir(compInstanceDir);
return compInstanceDir;
}
// 1. Create all config files for a component on hdfs for localization
// 2. Add the config file to localResource
public static synchronized void createConfigFileAndAddLocalResource(
AbstractLauncher launcher, SliderFileSystem fs, Component component,
Map<String, String> tokensForSubstitution, ComponentInstance instance,
ServiceContext context) throws IOException {
Path compInstanceDir = initCompInstanceDir(fs, instance);
if (!fs.getFileSystem().exists(compInstanceDir)) {
log.info(instance.getCompInstanceId() + ": Creating dir on hdfs: " + compInstanceDir);
fs.getFileSystem().mkdirs(compInstanceDir,
new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
} else {
log.info("Component instance conf dir already exists: " + compInstanceDir);
}
if (log.isDebugEnabled()) {
log.debug("Tokens substitution for component instance: " + instance
.getCompInstanceName() + System.lineSeparator()
+ tokensForSubstitution);
}
for (ConfigFile originalFile : component.getConfiguration().getFiles()) {
ConfigFile configFile = originalFile.copy();
String fileName = new Path(configFile.getDestFile()).getName();
// substitute file name
for (Map.Entry<String, String> token : tokensForSubstitution.entrySet()) {
configFile.setDestFile(configFile.getDestFile()
.replaceAll(Pattern.quote(token.getKey()), token.getValue()));
}
Path remoteFile = new Path(compInstanceDir, fileName);
if (!fs.getFileSystem().exists(remoteFile)) {
log.info("Saving config file on hdfs for component " + instance
.getCompInstanceName() + ": " + configFile);
if (configFile.getSrcFile() != null) {
// Load config file template
switch (configFile.getType()) {
case HADOOP_XML:
// Hadoop_xml_template
resolveHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(),
tokensForSubstitution, configFile, remoteFile, context);
break;
case TEMPLATE:
// plain-template
resolvePlainTemplateAndSaveOnHdfs(fs.getFileSystem(),
tokensForSubstitution, configFile, remoteFile, context);
break;
default:
log.info("Not supporting loading src_file for " + configFile);
break;
}
} else {
// If src_file is not specified
resolvePropsInConfigFileAndSaveOnHdfs(fs, tokensForSubstitution,
instance, configFile, fileName, remoteFile);
}
}
// Add resource for localization
LocalResource configResource =
fs.createAmResource(remoteFile, LocalResourceType.FILE);
File destFile = new File(configFile.getDestFile());
String symlink = APP_CONF_DIR + "/" + fileName;
if (destFile.isAbsolute()) {
launcher.addLocalResource(symlink, configResource,
configFile.getDestFile());
log.info("Add config file for localization: " + symlink + " -> "
+ configResource.getResource().getFile() + ", dest mount path: "
+ configFile.getDestFile());
} else {
launcher.addLocalResource(symlink, configResource);
log.info("Add config file for localization: " + symlink + " -> "
+ configResource.getResource().getFile());
}
}
}
private static void resolvePropsInConfigFileAndSaveOnHdfs(SliderFileSystem fs,
Map<String, String> tokensForSubstitution, ComponentInstance instance,
ConfigFile configFile, String fileName, Path remoteFile)
throws IOException {
// substitute non-template configs
substituteMapWithTokens(configFile.getProperties(), tokensForSubstitution);
// write configs onto hdfs
PublishedConfiguration publishedConfiguration =
new PublishedConfiguration(fileName,
configFile.getProperties().entrySet());
if (!fs.getFileSystem().exists(remoteFile)) {
PublishedConfigurationOutputter configurationOutputter =
PublishedConfigurationOutputter.createOutputter(
ConfigFormat.resolve(configFile.getType().toString()),
publishedConfiguration);
try (FSDataOutputStream os = fs.getFileSystem().create(remoteFile)) {
configurationOutputter.save(os);
os.flush();
}
} else {
log.info("Component instance = " + instance.getCompInstanceName()
+ ", config file already exists: " + remoteFile);
}
}
// 1. substitute config template - only handle hadoop_xml format
// 2. save on hdfs
@SuppressWarnings("unchecked")
private static void resolveHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs,
Map<String, String> tokensForSubstitution, ConfigFile configFile,
Path remoteFile, ServiceContext context) throws IOException {
Map<String, String> conf;
try {
conf = (Map<String, String>) context.configCache.get(configFile);
} catch (ExecutionException e) {
log.info("Failed to load config file: " + configFile, e);
return;
}
// make a copy for substitution
org.apache.hadoop.conf.Configuration confCopy =
new org.apache.hadoop.conf.Configuration(false);
for (Map.Entry<String, String> entry : conf.entrySet()) {
confCopy.set(entry.getKey(), entry.getValue());
}
// substitute properties
for (Map.Entry<String, String> entry : configFile.getProperties().entrySet()) {
confCopy.set(entry.getKey(), entry.getValue());
}
// substitute env variables
for (Map.Entry<String, String> entry : confCopy) {
String val = entry.getValue();
if (val != null) {
for (Map.Entry<String, String> token : tokensForSubstitution
.entrySet()) {
val = val.replaceAll(Pattern.quote(token.getKey()), token.getValue());
confCopy.set(entry.getKey(), val);
}
}
}
// save on hdfs
try (OutputStream output = fs.create(remoteFile)) {
confCopy.writeXml(output);
log.info("Reading config from: " + configFile.getSrcFile()
+ ", writing to: " + remoteFile);
}
}
// 1) read the template as a string
// 2) do token substitution
// 3) save on hdfs
private static void resolvePlainTemplateAndSaveOnHdfs(FileSystem fs,
Map<String, String> tokensForSubstitution, ConfigFile configFile,
Path remoteFile, ServiceContext context) {
String content;
try {
content = (String) context.configCache.get(configFile);
} catch (ExecutionException e) {
log.info("Failed to load config file: " + configFile, e);
return;
}
// substitute tokens
content = substituteStrWithTokens(content, tokensForSubstitution);
try (OutputStream output = fs.create(remoteFile)) {
org.apache.commons.io.IOUtils.write(content, output);
} catch (IOException e) {
log.info("Failed to create " + remoteFile);
}
}
/**
* Get initial component token map to be substituted into config values.
* @return tokens to replace
*/
public static Map<String, String> initCompTokensForSubstitute(
ComponentInstance instance) {
Map<String, String> tokens = new HashMap<>();
tokens.put(COMPONENT_NAME, instance.getCompSpec().getName());
tokens
.put(COMPONENT_NAME_LC, instance.getCompSpec().getName().toLowerCase());
tokens.put(COMPONENT_INSTANCE_NAME, instance.getCompInstanceName());
tokens.put(CONTAINER_ID, instance.getContainer().getId().toString());
tokens.put(COMPONENT_ID,
String.valueOf(instance.getCompInstanceId().getId()));
tokens.putAll(instance.getComponent().getDependencyHostIpTokens());
return tokens;
}
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.provider.defaultImpl;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
import org.apache.hadoop.yarn.service.api.records.Artifact;
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
import java.io.IOException;
import java.nio.file.Paths;
public class DefaultClientProvider extends AbstractClientProvider {
public DefaultClientProvider() {
}
@Override
public void validateArtifact(Artifact artifact, FileSystem fileSystem) {
}
@Override
protected void validateConfigFile(ConfigFile configFile, FileSystem
fileSystem) throws IOException {
// validate dest_file is not absolute
if (Paths.get(configFile.getDestFile()).isAbsolute()) {
throw new IllegalArgumentException(
"Dest_file must not be absolute path: " + configFile.getDestFile());
}
}
}

Some files were not shown because too many files have changed in this diff Show More