diff --git a/LICENSE.txt b/LICENSE.txt index 67472b6b5ce..b0cef03c547 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1777,6 +1777,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The binary distribution of this product bundles these dependencies under the following license: FindBugs-jsr305 3.0.0 +dnsjava 2.1.7, Copyright (c) 1998-2011, Brian Wellington. All rights reserved. -------------------------------------------------------------------------------- (2-clause BSD) Redistribution and use in source and binary forms, with or without diff --git a/NOTICE.txt b/NOTICE.txt index 0718909cb1b..bc7a26f9449 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -581,3 +581,13 @@ The binary distribution of this product bundles binaries of Ehcache 3.3.1, which has the following notices: * Ehcache V3 Copyright 2014-2016 Terracotta, Inc. + +The binary distribution of this product bundles binaries of +snakeyaml (https://bitbucket.org/asomov/snakeyaml), +which has the following notices: + * Copyright (c) 2008, http://www.snakeyaml.org + +The binary distribution of this product bundles binaries of +swagger-annotations (https://github.com/swagger-api/swagger-core), +which has the following notices: + * Copyright 2016 SmartBear Software diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml index 289061f8add..83633ac2071 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml @@ -86,6 +86,31 @@ *-sources.jar + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf + etc/hadoop + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples + /share/hadoop/${hadoop.component}/yarn-service-examples + + **/* + + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/target /share/hadoop/${hadoop.component}/sources diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e44a50cc857..f99e41386c4 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -89,6 +89,7 @@ 2.12.0 3.0.0 3.1.0-RC1 + 2.1.7 11.0.2 4.0 @@ -142,6 +143,9 @@ ${project.version} + + 1.5.4 + 1.16 @@ -422,6 +426,12 @@ ${project.version} + + org.apache.hadoop + hadoop-yarn-services-core + ${project.version} + + org.apache.hadoop hadoop-mapreduce-client-jobclient @@ -597,6 +607,11 @@ javax.servlet-api 3.1.0 + + javax.ws.rs + jsr311-api + 1.1.1 + org.eclipse.jetty jetty-server @@ -956,11 +971,6 @@ jackson-module-jaxb-annotations ${jackson2.version} - - com.fasterxml.jackson.jaxrs - jackson-jaxrs-json-provider - ${jackson2.version} - com.fasterxml.jackson.dataformat jackson-dataformat-cbor @@ -1213,6 +1223,13 @@ + + + dnsjava + dnsjava + ${dnsjava.version} + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml new file mode 100644 index 00000000000..ddea2a18f23 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml @@ -0,0 +1,130 @@ + + + 4.0.0 + + org.apache.hadoop + hadoop-yarn-applications + 3.1.0-SNAPSHOT + + hadoop-yarn-services-api + Apache Hadoop YARN Services API + jar + Hadoop YARN REST APIs for services + + + + + + + src/main/resources + true + + + src/main/scripts/ + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + + development + ${project.url} + + + + + + + + + + test-jar + + + + + + + + + + + + + + org.apache.hadoop + hadoop-yarn-services-core + + + org.apache.hadoop + hadoop-yarn-api + + + org.apache.hadoop + hadoop-yarn-common + + + org.apache.hadoop + hadoop-common + + + org.slf4j + slf4j-api + + + org.eclipse.jetty + jetty-webapp + + + com.google.inject + guice + + + javax.ws.rs + jsr311-api + + + org.mockito + mockito-all + test + + + + + + + + org.apache.hadoop + hadoop-common + test-jar + + + junit + junit + test + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java new file mode 100644 index 00000000000..1bb6c9387fe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.webapp; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.api.records.ServiceStatus; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import static org.apache.hadoop.yarn.service.api.records.ServiceState.ACCEPTED; +import static org.apache.hadoop.yarn.service.conf.RestApiConstants.*; + +/** + * The rest API endpoints for users to manage services on YARN. + */ +@Singleton +@Path(CONTEXT_ROOT) +public class ApiServer { + + public ApiServer() { + super(); + } + + @Inject + public ApiServer(Configuration conf) { + super(); + } + + private static final Logger LOG = + LoggerFactory.getLogger(ApiServer.class); + private static Configuration YARN_CONFIG = new YarnConfiguration(); + private static ServiceClient SERVICE_CLIENT; + + static { + init(); + } + + // initialize all the common resources - order is important + private static void init() { + SERVICE_CLIENT = new ServiceClient(); + SERVICE_CLIENT.init(YARN_CONFIG); + SERVICE_CLIENT.start(); + } + + @GET + @Path(VERSION) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response getVersion() { + String version = VersionInfo.getBuildVersion(); + LOG.info(version); + return Response.ok("{ \"hadoop_version\": \"" + version + "\"}").build(); + } + + @POST + @Path(SERVICE_ROOT_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response createService(Service service) { + LOG.info("POST: createService = {}", service); + ServiceStatus serviceStatus = new ServiceStatus(); + try { + ApplicationId applicationId = SERVICE_CLIENT.actionCreate(service); + LOG.info("Successfully created service " + service.getName() + + " applicationId = " + applicationId); + serviceStatus.setState(ACCEPTED); + serviceStatus.setUri( + CONTEXT_ROOT + SERVICE_ROOT_PATH + "/" + service + .getName()); + return Response.status(Status.ACCEPTED).entity(serviceStatus).build(); + } catch (IllegalArgumentException e) { + serviceStatus.setDiagnostics(e.getMessage()); + return Response.status(Status.BAD_REQUEST).entity(serviceStatus) + .build(); + } catch (Exception e) { + String message = "Failed to create service " + service.getName(); + LOG.error(message, e); + serviceStatus.setDiagnostics(message + ": " + e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(serviceStatus).build(); + } + } + + @GET + @Path(SERVICE_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response getService(@PathParam(SERVICE_NAME) String appName) { + LOG.info("GET: getService for appName = {}", appName); + ServiceStatus serviceStatus = new ServiceStatus(); + try { + Service app = SERVICE_CLIENT.getStatus(appName); + return Response.ok(app).build(); + } catch (IllegalArgumentException e) { + serviceStatus.setDiagnostics(e.getMessage()); + serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID); + return Response.status(Status.NOT_FOUND).entity(serviceStatus) + .build(); + } catch (Exception e) { + LOG.error("Get service failed", e); + serviceStatus + .setDiagnostics("Failed to retrieve service: " + e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(serviceStatus).build(); + } + } + + @DELETE + @Path(SERVICE_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response deleteService(@PathParam(SERVICE_NAME) String appName) { + LOG.info("DELETE: deleteService for appName = {}", appName); + return stopService(appName, true); + } + + private Response stopService(String appName, boolean destroy) { + try { + SERVICE_CLIENT.actionStop(appName, destroy); + if (destroy) { + SERVICE_CLIENT.actionDestroy(appName); + LOG.info("Successfully deleted service {}", appName); + } else { + LOG.info("Successfully stopped service {}", appName); + } + return Response.status(Status.OK).build(); + } catch (ApplicationNotFoundException e) { + ServiceStatus serviceStatus = new ServiceStatus(); + serviceStatus.setDiagnostics( + "Service " + appName + " is not found in YARN: " + e.getMessage()); + return Response.status(Status.BAD_REQUEST).entity(serviceStatus) + .build(); + } catch (Exception e) { + ServiceStatus serviceStatus = new ServiceStatus(); + serviceStatus.setDiagnostics(e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(serviceStatus).build(); + } + } + + @PUT + @Path(COMPONENT_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN }) + public Response updateComponent(@PathParam(SERVICE_NAME) String appName, + @PathParam(COMPONENT_NAME) String componentName, Component component) { + + if (component.getNumberOfContainers() < 0) { + return Response.status(Status.BAD_REQUEST).entity( + "Service = " + appName + ", Component = " + component.getName() + + ": Invalid number of containers specified " + component + .getNumberOfContainers()).build(); + } + ServiceStatus status = new ServiceStatus(); + try { + Map original = SERVICE_CLIENT.flexByRestService(appName, + Collections.singletonMap(component.getName(), + component.getNumberOfContainers())); + status.setDiagnostics( + "Updating component (" + componentName + ") size from " + original + .get(componentName) + " to " + component.getNumberOfContainers()); + return Response.ok().entity(status).build(); + } catch (YarnException | IOException e) { + status.setDiagnostics(e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status) + .build(); + } + } + + @PUT + @Path(SERVICE_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response updateService(@PathParam(SERVICE_NAME) String appName, + Service updateServiceData) { + LOG.info("PUT: updateService for app = {} with data = {}", appName, + updateServiceData); + + // Ignore the app name provided in updateServiceData and always use appName + // path param + updateServiceData.setName(appName); + + // For STOP the app should be running. If already stopped then this + // operation will be a no-op. For START it should be in stopped state. + // If already running then this operation will be a no-op. + if (updateServiceData.getState() != null + && updateServiceData.getState() == ServiceState.STOPPED) { + return stopService(appName, false); + } + + // If a START is requested + if (updateServiceData.getState() != null + && updateServiceData.getState() == ServiceState.STARTED) { + return startService(appName); + } + + // If new lifetime value specified then update it + if (updateServiceData.getLifetime() != null + && updateServiceData.getLifetime() > 0) { + return updateLifetime(appName, updateServiceData); + } + + // If nothing happens consider it a no-op + return Response.status(Status.NO_CONTENT).build(); + } + + private Response updateLifetime(String appName, Service updateAppData) { + ServiceStatus status = new ServiceStatus(); + try { + String newLifeTime = + SERVICE_CLIENT.updateLifetime(appName, updateAppData.getLifetime()); + status.setDiagnostics( + "Service (" + appName + ")'s lifeTime is updated to " + newLifeTime + + ", " + updateAppData.getLifetime() + + " seconds remaining"); + return Response.ok(status).build(); + } catch (Exception e) { + String message = + "Failed to update service (" + appName + ")'s lifetime to " + + updateAppData.getLifetime(); + LOG.error(message, e); + status.setDiagnostics(message + ": " + e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status) + .build(); + } + } + + private Response startService(String appName) { + ServiceStatus status = new ServiceStatus(); + try { + SERVICE_CLIENT.actionStart(appName); + LOG.info("Successfully started service " + appName); + status.setDiagnostics("Service " + appName + " is successfully started."); + status.setState(ServiceState.ACCEPTED); + return Response.ok(status).build(); + } catch (Exception e) { + String message = "Failed to start service " + appName; + status.setDiagnostics(message + ": " + e.getMessage()); + LOG.info(message, e); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(status).build(); + } + } + + /** + * Used by negative test case. + * + * @param mockServerClient - A mocked version of ServiceClient + */ + public static void setServiceClient(ServiceClient mockServerClient) { + SERVICE_CLIENT = mockServerClient; + SERVICE_CLIENT.init(YARN_CONFIG); + SERVICE_CLIENT.start(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java new file mode 100644 index 00000000000..f4acd942cc9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.webapp; + +import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; +import org.eclipse.jetty.webapp.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*; + +/** + * This class launches the web service using Hadoop HttpServer2 (which uses + * an embedded Jetty container). This is the entry point to your service. + * The Java command used to launch this app should call the main method. + */ +public class ApiServerWebApp extends AbstractService { + private static final Logger logger = LoggerFactory + .getLogger(ApiServerWebApp.class); + private static final String SEP = ";"; + + // REST API server for YARN native services + private HttpServer2 apiServer; + private InetSocketAddress bindAddress; + + public static void main(String[] args) throws IOException { + ApiServerWebApp apiWebApp = new ApiServerWebApp(); + try { + apiWebApp.init(new YarnConfiguration()); + apiWebApp.serviceStart(); + } catch (Exception e) { + logger.error("Got exception starting", e); + apiWebApp.close(); + } + } + + public ApiServerWebApp() { + super(ApiServerWebApp.class.getName()); + } + + @Override + protected void serviceStart() throws Exception { + bindAddress = getConfig().getSocketAddr(API_SERVER_ADDRESS, + DEFAULT_API_SERVER_ADDRESS, DEFAULT_API_SERVER_PORT); + logger.info("YARN API server running on " + bindAddress); + if (UserGroupInformation.isSecurityEnabled()) { + doSecureLogin(getConfig()); + } + startWebApp(); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (apiServer != null) { + apiServer.stop(); + } + super.serviceStop(); + } + + private void doSecureLogin(org.apache.hadoop.conf.Configuration conf) + throws IOException { + SecurityUtil.login(conf, YarnConfiguration.RM_KEYTAB, + YarnConfiguration.RM_PRINCIPAL, bindAddress.getHostName()); + addFilters(conf); + } + + private void addFilters(org.apache.hadoop.conf.Configuration conf) { + // Always load pseudo authentication filter to parse "user.name" in an URL + // to identify a HTTP request's user. + boolean hasHadoopAuthFilterInitializer = false; + String filterInitializerConfKey = "hadoop.http.filter.initializers"; + Class[] initializersClasses = + conf.getClasses(filterInitializerConfKey); + List targets = new ArrayList(); + if (initializersClasses != null) { + for (Class initializer : initializersClasses) { + if (initializer.getName().equals( + AuthenticationFilterInitializer.class.getName())) { + hasHadoopAuthFilterInitializer = true; + break; + } + targets.add(initializer.getName()); + } + } + if (!hasHadoopAuthFilterInitializer) { + targets.add(AuthenticationFilterInitializer.class.getName()); + conf.set(filterInitializerConfKey, StringUtils.join(",", targets)); + } + } + + private void startWebApp() throws IOException { + URI uri = URI.create("http://" + NetUtils.getHostPortString(bindAddress)); + + apiServer = new HttpServer2.Builder() + .setName("api-server") + .setConf(getConfig()) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey(RM_WEBAPP_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + .addEndpoint(uri).build(); + + String apiPackages = + ApiServer.class.getPackage().getName() + SEP + + GenericExceptionHandler.class.getPackage().getName() + SEP + + YarnJacksonJaxbJsonProvider.class.getPackage().getName(); + apiServer.addJerseyResourcePackage(apiPackages, "/*"); + + try { + logger.info("Service starting up. Logging start..."); + apiServer.start(); + logger.info("Server status = {}", apiServer.toString()); + for (Configuration conf : apiServer.getWebAppContext() + .getConfigurations()) { + logger.info("Configurations = {}", conf); + } + logger.info("Context Path = {}", Collections.singletonList( + apiServer.getWebAppContext().getContextPath())); + logger.info("ResourceBase = {}", Collections.singletonList( + apiServer.getWebAppContext().getResourceBase())); + logger.info("War = {}", Collections + .singletonList(apiServer.getWebAppContext().getWar())); + } catch (Exception ex) { + logger.error("Hadoop HttpServer2 App **failed**", ex); + throw ex; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md new file mode 100644 index 00000000000..00b21dd60ed --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md @@ -0,0 +1,245 @@ + + +## Examples + +### Create a simple single-component service with most attribute values as defaults +POST URL - http://localhost:9191/ws/v1/services + +##### POST Request JSON +```json +{ + "name": "hello-world", + "components" : + [ + { + "name": "hello", + "number_of_containers": 1, + "artifact": { + "id": "nginx:latest", + "type": "DOCKER" + }, + "launch_command": "./start_nginx.sh", + "resource": { + "cpus": 1, + "memory": "256" + } + } + ] +} +``` + +##### GET Response JSON +GET URL - http://localhost:9191/ws/v1/services/hello-world + +Note, lifetime value of -1 means unlimited lifetime. + +```json +{ + "name": "hello-world", + "id": "application_1503963985568_0002", + "lifetime": -1, + "components": [ + { + "name": "hello", + "dependencies": [], + "resource": { + "cpus": 1, + "memory": "256" + }, + "configuration": { + "properties": {}, + "env": {}, + "files": [] + }, + "quicklinks": [], + "containers": [ + { + "id": "container_e03_1503963985568_0002_01_000001", + "ip": "10.22.8.143", + "hostname": "myhost.local", + "state": "READY", + "launch_time": 1504051512412, + "bare_host": "10.22.8.143", + "component_name": "hello-0" + }, + { + "id": "container_e03_1503963985568_0002_01_000002", + "ip": "10.22.8.143", + "hostname": "myhost.local", + "state": "READY", + "launch_time": 1504051536450, + "bare_host": "10.22.8.143", + "component_name": "hello-1" + } + ], + "launch_command": "./start_nginx.sh", + "number_of_containers": 1, + "run_privileged_container": false + } + ], + "configuration": { + "properties": {}, + "env": {}, + "files": [] + }, + "quicklinks": {} +} + +``` +### Update to modify the lifetime of a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world + +##### PUT Request JSON + +Note, irrespective of what the current lifetime value is, this update request will set the lifetime of the service to be 3600 seconds (1 hour) from the time the request is submitted. Hence, if a a service has remaining lifetime of 5 mins (say) and would like to extend it to an hour OR if an application has remaining lifetime of 5 hours (say) and would like to reduce it down to an hour, then for both scenarios you need to submit the same request below. + +```json +{ + "lifetime": 3600 +} +``` +### Stop a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world + +##### PUT Request JSON +```json +{ + "state": "STOPPED" +} +``` + +### Start a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world + +##### PUT Request JSON +```json +{ + "state": "STARTED" +} +``` + +### Update to flex up/down the no of containers (instances) of a component of a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world/components/hello + +##### PUT Request JSON +```json +{ + "name": "hello", + "number_of_containers": 3 +} +``` + +### Destroy a service +DELETE URL - http://localhost:9191/ws/v1/services/hello-world + +*** + +### Create a complicated service - HBase +POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1 + +##### POST Request JSON + +```json +{ + "name": "hbase-app-1", + "lifetime": "3600", + "components": [ + { + "name": "hbasemaster", + "number_of_containers": 1, + "artifact": { + "id": "hbase:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/hdp/current/hbase-master/bin/hbase master start", + "resource": { + "cpus": 1, + "memory": "2048" + }, + "configuration": { + "env": { + "HBASE_LOG_DIR": "" + }, + "files": [ + { + "type": "XML", + "dest_file": "/etc/hadoop/conf/core-site.xml", + "properties": { + "fs.defaultFS": "${CLUSTER_FS_URI}" + } + }, + { + "type": "XML", + "dest_file": "/etc/hbase/conf/hbase-site.xml", + "properties": { + "hbase.cluster.distributed": "true", + "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", + "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", + "zookeeper.znode.parent": "${SERVICE_ZK_PATH}", + "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}", + "hbase.master.info.port": "16010" + } + } + ] + } + }, + { + "name": "regionserver", + "number_of_containers": 3, + "unique_component_support": "true", + "artifact": { + "id": "hbase:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/hdp/current/hbase-regionserver/bin/hbase regionserver start", + "resource": { + "cpus": 1, + "memory": "2048" + }, + "configuration": { + "env": { + "HBASE_LOG_DIR": "" + }, + "files": [ + { + "type": "XML", + "dest_file": "/etc/hadoop/conf/core-site.xml", + "properties": { + "fs.defaultFS": "${CLUSTER_FS_URI}" + } + }, + { + "type": "XML", + "dest_file": "/etc/hbase/conf/hbase-site.xml", + "properties": { + "hbase.cluster.distributed": "true", + "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", + "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", + "zookeeper.znode.parent": "${SERVICE_ZK_PATH}", + "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}", + "hbase.master.info.port": "16010", + "hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}" + } + } + ] + } + } + ], + "quicklinks": { + "HBase Master Status UI": "http://hbasemaster0.${SERVICE_NAME}.${USER}.${DOMAIN}:16010/master-status", + "Proxied HBase Master Status UI": "http://app-proxy/${DOMAIN}/${USER}/${SERVICE_NAME}/hbasemaster/16010/" + } +} +``` diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml new file mode 100644 index 00000000000..088b50cca8b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml @@ -0,0 +1,471 @@ +# Hadoop YARN REST APIs for services v1 spec in YAML + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +swagger: '2.0' +info: + title: "YARN Simplified API layer for services" + description: | + Bringing a new service on YARN today is not a simple experience. The APIs of existing + frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs) + or writing a complex spec (for declarative frameworks). + + This simplified REST API can be used to create and manage the lifecycle of YARN services. + In most cases, the application owner will not be forced to make any changes to their applications. + This is primarily true if the application is packaged with containerization technologies like Docker. + + This document describes the API specifications (aka. YarnFile) for deploying/managing + containerized services on YARN. The same JSON spec can be used for both REST API + and CLI to manage the services. + + version: "1.0.0" + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html +# the domain of the service +host: host.mycompany.com +port: 9191(default) +# array of all schemes that your API supports +schemes: + - http +consumes: + - application/json +produces: + - application/json +paths: + /ws/v1/services/version: + get: + summary: Get current version of the API server. + description: Get current version of the API server. + responses: + 200: + description: Successful request + + /ws/v1/services: + get: + summary: (TBD) List of services running in the cluster. + description: Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name. + responses: + 200: + description: An array of services + schema: + type: array + items: + $ref: '#/definitions/Service' + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + post: + summary: Create a service + description: Create a service. The request JSON is a service object with details required for creation. If the request is successful it returns 202 Accepted. A success of this API only confirms success in submission of the service creation request. There is no guarantee that the service will actually reach a RUNNING state. Resource availability and several other factors determines if the service will be deployed in the cluster. It is expected that clients would subsequently call the GET API to get details of the service and determine its state. + parameters: + - name: Service + in: body + description: Service request object + required: true + schema: + $ref: '#/definitions/Service' + responses: + 202: + description: The request to create a service is accepted + 400: + description: Invalid service definition provided in the request body + 500: + description: Failed to create a service + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + + /ws/v1/services/{service_name}: + put: + summary: Update a service or upgrade the binary version of the components of a running service + description: Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service. + The PUT operation is also used to orchestrate an upgrade of the service containers to a newer version of their artifacts (TBD). + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + - name: Service + in: body + description: The updated service definition. It can contain the updated lifetime of a service or the desired state (STOPPED/STARTED) of a service to initiate a start/stop operation against the specified service + required: true + schema: + $ref: '#/definitions/Service' + responses: + 204: + description: Update or upgrade was successful + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + delete: + summary: Destroy a service + description: Destroy a service and release all resources. This API might have to return JSON data providing location of logs (TBD), etc. + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + responses: + 204: + description: Destroy was successful + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + get: + summary: Get details of a service. + description: Return the details (including containers) of a running service + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + responses: + 200: + description: a service object + schema: + type: object + items: + $ref: '#/definitions/Service' + examples: + service_name: logsearch + artifact: + id: logsearch:latest + type: docker + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + /ws/v1/services/{service_name}/components/{component_name}: + put: + summary: Flex a component's number of instances. + description: Set a component's desired number of instanes + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + - name: component_name + in: path + description: Component name + required: true + type: string + - name: Component + in: body + description: The definition of a component which contains the updated number of instances. + required: true + schema: + $ref: '#/definitions/Component' + responses: + 200: + description: Flex was successful + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' +definitions: + Service: + description: a service resource has the following attributes. + required: + - name + properties: + name: + type: string + description: A unique service name. If Registry DNS is enabled, the max length is 63 characters. + id: + type: string + description: A unique service id. + artifact: + description: The default artifact for all components of the service except the components which has Artifact type set to SERVICE (optional). + $ref: '#/definitions/Artifact' + resource: + description: The default resource for all components of the service (optional). + $ref: '#/definitions/Resource' + launch_time: + type: string + format: date + description: The time when the service was created, e.g. 2016-03-16T01:01:49.000Z. + number_of_running_containers: + type: integer + format: int64 + description: In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests. + lifetime: + type: integer + format: int64 + description: Life time (in seconds) of the service from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value. + placement_policy: + description: (TBD) Advanced scheduling and placement policies. If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902. + $ref: '#/definitions/PlacementPolicy' + components: + description: Components of a service. + type: array + items: + $ref: '#/definitions/Component' + configuration: + description: Config properties of a service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level. + $ref: '#/definitions/Configuration' + state: + description: State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state. + $ref: '#/definitions/ServiceState' + quicklinks: + type: object + description: A blob of key-value pairs of quicklinks to be exported for a service. + additionalProperties: + type: string + queue: + type: string + description: The YARN queue that this service should be submitted to. + Resource: + description: + Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise. + properties: + profile: + type: string + description: Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc. + cpus: + type: integer + format: int32 + description: Amount of vcores allocated to each container (optional but overrides cpus in profile if specified). + memory: + type: string + description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB. + PlacementPolicy: + description: Placement policy of an instance of a service. This feature is in the works in YARN-6592. + properties: + label: + type: string + description: Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc. + Artifact: + description: Artifact of a service component. If not specified, component will just run the bare launch command and no artifact will be localized. + required: + - id + properties: + id: + type: string + description: Artifact id. Examples are package location uri for tarball based services, image name for docker, name of service, etc. + type: + type: string + description: Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For SERVICE type, the service specified will be read and its components will be added into this service. The original component with artifact type SERVICE will be removed (any properties specified in the original component will be ignored). + enum: + - DOCKER + - TARBALL + - SERVICE + default: DOCKER + uri: + type: string + description: Artifact location to support multiple artifact stores (optional). + Component: + description: One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services. + required: + - name + properties: + name: + type: string + description: Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters. + state: + description: The state of the component + $ref: "#/definitions/ComponentState" + dependencies: + type: array + items: + type: string + description: An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG. + readiness_check: + description: Readiness check for this component. + $ref: '#/definitions/ReadinessCheck' + artifact: + description: Artifact of the component (optional). If not specified, the service level global artifact takes effect. + $ref: '#/definitions/Artifact' + launch_command: + type: string + description: The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any). + resource: + description: Resource of this component (optional). If not specified, the service level global resource takes effect. + $ref: '#/definitions/Resource' + number_of_containers: + type: integer + format: int64 + description: Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect. + run_privileged_container: + type: boolean + description: Run all containers of this component in privileged mode (YARN-4262). + placement_policy: + description: Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details. + $ref: '#/definitions/PlacementPolicy' + configuration: + description: Config properties for this component. + $ref: '#/definitions/Configuration' + quicklinks: + type: array + items: + type: string + description: A list of quicklink keys defined at the service level, and to be resolved by this component. + ReadinessCheck: + description: A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases. + required: + - type + properties: + type: + type: string + description: E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content). + enum: + - HTTP + - PORT + properties: + type: object + description: A blob of key value pairs that will be used to configure the check. + additionalProperties: + type: string + artifact: + description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET + $ref: '#/definitions/Artifact' + Configuration: + description: Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported. + properties: + properties: + type: object + description: A blob of key-value pairs for configuring the YARN service AM + additionalProperties: + type: string + env: + type: object + description: A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection. + additionalProperties: + type: string + files: + description: Array of list of files that needs to be created and made available as volumes in the service component containers. + type: array + items: + $ref: '#/definitions/ConfigFile' + ConfigFile: + description: A config file that needs to be created and made available as a volume in a service component container. + properties: + type: + type: string + description: Config file in the standard format like xml, properties, json, yaml, template. + enum: + - XML + - PROPERTIES + - JSON + - YAML + - TEMPLATE + - ENV + - HADOOP_XML + dest_file: + type: string + description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers. If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf. + src_file: + type: string + description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported. + properties: + type: object + description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file. + Container: + description: An instance of a running service container. + properties: + id: + type: string + description: Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002. + launch_time: + type: string + format: date + description: The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time. + ip: + type: string + description: IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007. + hostname: + type: string + description: Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007. + bare_host: + type: string + description: The bare node or host in which the container is running, e.g. cn008.example.com. + state: + description: State of the container of a service. + $ref: '#/definitions/ContainerState' + component_instance_name: + type: string + description: Name of the component instance that this container instance belongs to. Component instance name is named as $COMPONENT_NAME-i, where i is a + monotonically increasing integer. E.g. A componet called nginx can have multiple component instances named as nginx-0, nginx-1 etc. + Each component instance is backed by a container instance. + resource: + description: Resource used for this container. + $ref: '#/definitions/Resource' + artifact: + description: Artifact used for this container. + $ref: '#/definitions/Artifact' + privileged_container: + type: boolean + description: Container running in privileged mode or not. + ServiceState: + description: The current state of a service. + properties: + state: + type: string + description: enum of the state of the service + enum: + - ACCEPTED + - STARTED + - STABLE + - STOPPED + - FAILED + ContainerState: + description: The current state of the container of a service. + properties: + state: + type: string + description: enum of the state of the container + enum: + - INIT + - STARTED + - READY + ComponentState: + description: The state of the component + properties: + state: + type: string + description: enum of the state of the component + enum: + - FLEXING + - STABLE + ServiceStatus: + description: The current status of a submitted service, returned as a response to the GET API. + properties: + diagnostics: + type: string + description: Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state. + state: + description: Service state. + $ref: '#/definitions/ServiceState' + code: + type: integer + format: int32 + description: An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties new file mode 100644 index 00000000000..8c679b9fc20 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties @@ -0,0 +1,76 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is the log4j configuration for YARN Services REST API Server + +# Log rotation based on size (100KB) with a max of 10 backup files +log4j.rootLogger=INFO, restservicelog +log4j.threshhold=ALL + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n + +log4j.appender.restservicelog=org.apache.log4j.RollingFileAppender +log4j.appender.restservicelog.layout=org.apache.log4j.PatternLayout +log4j.appender.restservicelog.File=${REST_SERVICE_LOG_DIR}/restservice.log +log4j.appender.restservicelog.MaxFileSize=1GB +log4j.appender.restservicelog.MaxBackupIndex=10 + +# log layout skips stack-trace creation operations by avoiding line numbers and method +log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +# debug edition is much more expensive +#log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n + +# configure stderr +# set the conversion pattern of stderr +# Print the date in ISO 8601 format +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.Target=System.err +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +log4j.appender.subprocess=org.apache.log4j.ConsoleAppender +log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout +log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n + +# for debugging REST API Service +#log4j.logger.org.apache.hadoop.yarn.services=DEBUG + +# uncomment to debug service lifecycle issues +#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG +#log4j.logger.org.apache.hadoop.yarn.service=DEBUG + +# uncomment for YARN operations +#log4j.logger.org.apache.hadoop.yarn.client=DEBUG + +# uncomment this to debug security problems +#log4j.logger.org.apache.hadoop.security=DEBUG + +#crank back on some noise +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.hadoop.hdfs=WARN +log4j.logger.org.apache.hadoop.hdfs.shortcircuit=ERROR + +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.apache.curator.framework.state=ERROR +log4j.logger.org.apache.curator.framework.imps=WARN + +log4j.logger.org.mortbay.log=DEBUG diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app new file mode 100644 index 00000000000..6a077b10c2a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DON'T DELETE. REST WEBAPP RUN SCRIPT WILL STOP WORKING. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml new file mode 100644 index 00000000000..1282c9f8635 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml @@ -0,0 +1,36 @@ + + + + + + Jersey REST API + com.sun.jersey.spi.container.servlet.ServletContainer + + com.sun.jersey.config.property.packages + org.apache.hadoop.yarn.service.webapp,org.apache.hadoop.yarn.service.api,org.apache.hadoop.yarn.service.api.records + + + com.sun.jersey.api.json.POJOMappingFeature + true + + 1 + + + Jersey REST API + /* + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java new file mode 100644 index 00000000000..3e08c3ac339 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; + +/** + * A mock version of ServiceClient - This class is design + * to simulate various error conditions that will happen + * when a consumer class calls ServiceClient. + */ +public class ServiceClientTest extends ServiceClient { + + private Configuration conf = new Configuration(); + + protected static void init() { + } + + public ServiceClientTest() { + super(); + } + + @Override + public Configuration getConfig() { + return conf; + } + + @Override + public ApplicationId actionCreate(Service service) { + String serviceName = service.getName(); + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + return ApplicationId.newInstance(System.currentTimeMillis(), 1); + } + + @Override + public Service getStatus(String appName) { + if (appName == null) { + throw new NullPointerException(); + } + if (appName.equals("jenkins")) { + return new Service(); + } else { + throw new IllegalArgumentException(); + } + } + + @Override + public int actionStart(String serviceName) + throws YarnException, IOException { + if (serviceName == null) { + throw new NullPointerException(); + } + if (serviceName.equals("jenkins")) { + return EXIT_SUCCESS; + } else { + throw new ApplicationNotFoundException(""); + } + } + + @Override + public int actionStop(String serviceName, boolean waitForAppStopped) + throws YarnException, IOException { + if (serviceName == null) { + throw new NullPointerException(); + } + if (serviceName.equals("jenkins")) { + return EXIT_SUCCESS; + } else { + throw new ApplicationNotFoundException(""); + } + } + + @Override + public int actionDestroy(String serviceName) { + if (serviceName == null) { + throw new NullPointerException(); + } + if (serviceName.equals("jenkins")) { + return EXIT_SUCCESS; + } else { + throw new IllegalArgumentException(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java new file mode 100644 index 00000000000..2b224747ae1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java @@ -0,0 +1,366 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.webapp.ApiServer; +import javax.ws.rs.Path; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; + +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.*; + +/** + * Test case for ApiServer REST API. + * + */ +public class TestApiServer { + private ApiServer apiServer; + + @Before + public void setup() throws Exception { + ServiceClient mockServerClient = new ServiceClientTest(); + Configuration conf = new Configuration(); + conf.set("yarn.api-service.service.client.class", + ServiceClientTest.class.getName()); + ApiServer.setServiceClient(mockServerClient); + this.apiServer = new ApiServer(conf); + } + + @Test + public void testPathAnnotation() { + assertNotNull(this.apiServer.getClass().getAnnotation(Path.class)); + assertTrue("The controller has the annotation Path", + this.apiServer.getClass().isAnnotationPresent(Path.class)); + final Path path = this.apiServer.getClass() + .getAnnotation(Path.class); + assertEquals("The path has /ws/v1 annotation", path.value(), + "/ws/v1"); + } + + @Test + public void testGetVersion() { + final Response actual = apiServer.getVersion(); + assertEquals("Version number is", actual.getStatus(), + Response.ok().build().getStatus()); + } + + @Test + public void testBadCreateService() { + Service service = new Service(); + // Test for invalid argument + final Response actual = apiServer.createService(service); + assertEquals("Create service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testGoodCreateService() { + Service service = new Service(); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.createService(service); + assertEquals("Create service is ", actual.getStatus(), + Response.status(Status.ACCEPTED).build().getStatus()); + } + + @Test + public void testBadGetService() { + final Response actual = apiServer.getService("no-jenkins"); + assertEquals("Get service is ", actual.getStatus(), + Response.status(Status.NOT_FOUND).build().getStatus()); + } + + @Test + public void testBadGetService2() { + final Response actual = apiServer.getService(null); + assertEquals("Get service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR) + .build().getStatus()); + } + + @Test + public void testGoodGetService() { + final Response actual = apiServer.getService("jenkins"); + assertEquals("Get service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadDeleteService() { + final Response actual = apiServer.deleteService("no-jenkins"); + assertEquals("Delete service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testBadDeleteService2() { + final Response actual = apiServer.deleteService(null); + assertEquals("Delete service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR) + .build().getStatus()); + } + + @Test + public void testGoodDeleteService() { + final Response actual = apiServer.deleteService("jenkins"); + assertEquals("Delete service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testDecreaseContainerAndStop() { + Service service = new Service(); + service.setState(ServiceState.STOPPED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(0L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("update service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadDecreaseContainerAndStop() { + Service service = new Service(); + service.setState(ServiceState.STOPPED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("no-jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("flex service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testIncreaseContainersAndStart() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(2L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("flex service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadStartServices() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(2L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("start service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR).build() + .getStatus()); + } + + @Test + public void testGoodStartServices() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(2L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("start service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadStopServices() { + Service service = new Service(); + service.setState(ServiceState.STOPPED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("no-jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("stop service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testGoodStopServices() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("stop service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testUpdateService() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("no-jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("update service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR) + .build().getStatus()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml new file mode 100644 index 00000000000..2814ccaa0cd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf/yarnservice-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf/yarnservice-log4j.properties new file mode 100644 index 00000000000..58c8e27133a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf/yarnservice-log4j.properties @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is the log4j configuration for Slider Application Master + +# Log rotation based on size (256MB) with a max of 20 backup files +log4j.rootLogger=INFO, amlog +log4j.threshhold=ALL +log4j.appender.amlog=org.apache.log4j.RollingFileAppender +log4j.appender.amlog.layout=org.apache.log4j.PatternLayout +log4j.appender.amlog.File=${LOG_DIR}/serviceam.log +log4j.appender.amlog.MaxFileSize=256MB +log4j.appender.amlog.MaxBackupIndex=20 + +# log layout skips stack-trace creation operations by avoiding line numbers and method +log4j.appender.amlog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +# debug edition is much more expensive +#log4j.appender.amlog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n + +# configure stderr +# set the conversion pattern of stderr +# Print the date in ISO 8601 format +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.Target=System.err +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +log4j.appender.subprocess=org.apache.log4j.ConsoleAppender +log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout +log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n + +# for debugging yarn-service framework +#log4j.logger.org.apache.hadoop.yarn.service=DEBUG + +# uncomment for YARN operations +#log4j.logger.org.apache.hadoop.yarn.client=DEBUG + +# uncomment this to debug security problems +#log4j.logger.org.apache.hadoop.security=DEBUG + +#crank back on some noise +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.hadoop.hdfs=WARN + +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.apache.curator.framework.state=ERROR +log4j.logger.org.apache.curator.framework.imps=WARN diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-no-dns.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-no-dns.json new file mode 100644 index 00000000000..7bf2f68f2bc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-no-dns.json @@ -0,0 +1,62 @@ +{ + "name": "httpd-service-no-dns", + "lifetime": "3600", + "components": [ + { + "name": "httpd", + "number_of_containers": 2, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "readiness_check": { + "type": "HTTP", + "properties": { + "url": "http://${THIS_HOST}:8080" + } + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/var/www/html/index.html", + "properties": { + "content": "
Title
Hello from ${COMPONENT_INSTANCE_NAME}!" + } + } + ] + } + }, + { + "name": "httpd-proxy", + "number_of_containers": 1, + "dependencies": [ "httpd" ], + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/etc/httpd/conf.d/httpd-proxy.conf", + "src_file": "httpd-proxy-no-dns.conf" + } + ] + } + } + ], + "quicklinks": { + "Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080" + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-proxy-no-dns.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-proxy-no-dns.conf new file mode 100644 index 00000000000..9894e64d508 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-proxy-no-dns.conf @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + BalancerMember http://${HTTPD-0_IP}:8080 + BalancerMember http://${HTTPD-1_IP}:8080 + ProxySet lbmethod=bytraffic + + +ProxyPass "/" "balancer://test/" +ProxyPassReverse "/" "balancer://test/" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd-proxy.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd-proxy.conf new file mode 100644 index 00000000000..e8651a5c6cd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd-proxy.conf @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + BalancerMember http://httpd-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080 + BalancerMember http://httpd-1.${SERVICE_NAME}.${USER}.${DOMAIN}:8080 + ProxySet lbmethod=bytraffic + + +ProxyPass "/" "balancer://test/" +ProxyPassReverse "/" "balancer://test/" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd.json new file mode 100644 index 00000000000..87862144090 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd.json @@ -0,0 +1,55 @@ +{ + "name": "httpd-service", + "lifetime": "3600", + "components": [ + { + "name": "httpd", + "number_of_containers": 2, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/var/www/html/index.html", + "properties": { + "content": "
Title
Hello from ${COMPONENT_INSTANCE_NAME}!" + } + } + ] + } + }, + { + "name": "httpd-proxy", + "number_of_containers": 1, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/etc/httpd/conf.d/httpd-proxy.conf", + "src_file": "httpd-proxy.conf" + } + ] + } + } + ], + "quicklinks": { + "Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080" + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/sleeper/sleeper.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/sleeper/sleeper.json new file mode 100644 index 00000000000..89ce5274132 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/sleeper/sleeper.json @@ -0,0 +1,15 @@ +{ + "name": "sleeper-service", + "components" : + [ + { + "name": "sleeper", + "number_of_containers": 2, + "launch_command": "sleep 900000", + "resource": { + "cpus": 1, + "memory": "256" + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml new file mode 100644 index 00000000000..6c9a8752f5e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -0,0 +1,255 @@ + + + 4.0.0 + + org.apache.hadoop + hadoop-yarn-services + 3.1.0-SNAPSHOT + + hadoop-yarn-services-core + jar + Apache Hadoop YARN Services Core + + + + ${project.parent.basedir} + + + + + + + src/main/resources + true + + + + + + org.apache.hadoop + hadoop-maven-plugins + + + compile-protoc + + protoc + + + ${protobuf.version} + ${protoc.path} + + ${basedir}/src/main/proto + + + ${basedir}/src/main/proto + + ClientAMProtocol.proto + + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + ${java.home} + + + + + + org.apache.rat + apache-rat-plugin + + + **/*.json + + + + + + + + + org.slf4j + slf4j-api + + + + log4j + log4j + runtime + + + + com.google.guava + guava + + + + org.codehaus.jackson + jackson-core-asl + + + + org.codehaus.jackson + jackson-mapper-asl + + + + com.fasterxml.jackson.core + jackson-annotations + + + + org.apache.hadoop + hadoop-hdfs-client + + + + org.apache.hadoop + hadoop-yarn-client + + + + org.apache.hadoop + hadoop-yarn-registry + + + + org.apache.hadoop + hadoop-yarn-common + + + + org.apache.hadoop + hadoop-yarn-server-common + + + + org.apache.hadoop + hadoop-common + + + + org.apache.hadoop + hadoop-annotations + + + + org.apache.hadoop + hadoop-yarn-api + + + + com.google.protobuf + protobuf-java + + + + org.apache.commons + commons-configuration2 + + + + org.apache.commons + commons-compress + + + + commons-io + commons-io + + + + commons-lang + commons-lang + + + + org.apache.curator + curator-client + + + + org.apache.curator + curator-framework + + + + javax.xml.bind + jaxb-api + + + + org.yaml + snakeyaml + + + + io.swagger + swagger-annotations + + + + + + + + junit + junit + test + + + + org.mockito + mockito-all + test + + + + org.apache.hadoop + hadoop-minicluster + test + + + + org.apache.curator + curator-test + test + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMProtocol.java new file mode 100644 index 00000000000..516d23d3ae6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMProtocol.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; + +import java.io.IOException; + +public interface ClientAMProtocol { + FlexComponentsResponseProto flexComponents(FlexComponentsRequestProto request) + throws IOException, YarnException; + + GetStatusResponseProto getStatus(GetStatusRequestProto requestProto) + throws IOException, YarnException; + + StopResponseProto stop(StopRequestProto requestProto) + throws IOException, YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java new file mode 100644 index 00000000000..8e4c34dc690 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto; +import org.apache.hadoop.yarn.service.component.ComponentEvent; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX; + +public class ClientAMService extends AbstractService + implements ClientAMProtocol { + + private static final Logger LOG = + LoggerFactory.getLogger(ClientAMService.class); + + private ServiceContext context; + private Server server; + + private InetSocketAddress bindAddress; + + public ClientAMService(ServiceContext context) { + super("Client AM Service"); + this.context = context; + } + + @Override protected void serviceStart() throws Exception { + Configuration conf = getConfig(); + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress address = new InetSocketAddress(0); + server = rpc.getServer(ClientAMProtocol.class, this, address, conf, + context.secretManager, 1); + server.start(); + + String nodeHostString = + System.getenv(ApplicationConstants.Environment.NM_HOST.name()); + + bindAddress = NetUtils.createSocketAddrForHost(nodeHostString, + server.getListenerAddress().getPort()); + + LOG.info("Instantiated ClientAMService at " + bindAddress); + super.serviceStart(); + } + + @Override protected void serviceStop() throws Exception { + if (server != null) { + server.stop(); + } + super.serviceStop(); + } + + @Override public FlexComponentsResponseProto flexComponents( + FlexComponentsRequestProto request) throws IOException { + if (!request.getComponentsList().isEmpty()) { + for (ComponentCountProto component : request.getComponentsList()) { + ComponentEvent event = new ComponentEvent(component.getName(), FLEX) + .setDesired(component.getNumberOfContainers()); + context.scheduler.getDispatcher().getEventHandler().handle(event); + LOG.info("Flexing component {} to {}", component.getName(), + component.getNumberOfContainers()); + } + } + return FlexComponentsResponseProto.newBuilder().build(); + } + + @Override + public GetStatusResponseProto getStatus(GetStatusRequestProto request) + throws IOException, YarnException { + String stat = ServiceApiUtil.jsonSerDeser.toJson(context.service); + return GetStatusResponseProto.newBuilder().setStatus(stat).build(); + } + + @Override + public StopResponseProto stop(StopRequestProto requestProto) + throws IOException, YarnException { + LOG.info("Stop the service."); + // Stop the service in 2 seconds delay to make sure this rpc call is completed. + // shutdown hook will be executed which will stop AM gracefully. + Thread thread = new Thread() { + @Override + public void run() { + try { + Thread.sleep(2000); + ExitUtil.terminate(0); + } catch (InterruptedException e) { + LOG.error("Interrupted while stopping", e); + } + } + }; + thread.start(); + return StopResponseProto.newBuilder().build(); + } + + public InetSocketAddress getBindAddress() { + return bindAddress; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java new file mode 100644 index 00000000000..4743f283445 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.yarn.service.component.Component; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.NODE_BLACKLIST_THRESHOLD; + +/** + * This tracks the container failures per node. If the failure counter exceeds + * the maxFailurePerNode limit, it'll blacklist that node. + * + */ +public class ContainerFailureTracker { + + private static final Logger LOG = + LoggerFactory.getLogger(ContainerFailureTracker.class); + + // Host -> num container failures + private Map failureCountPerNode = new HashMap<>(); + private Set blackListedNodes = new HashSet<>(); + private ServiceContext context; + private int maxFailurePerNode; + private Component component; + + public ContainerFailureTracker(ServiceContext context, Component component) { + this.context = context; + this.component = component; + maxFailurePerNode = component.getComponentSpec().getConfiguration() + .getPropertyInt(NODE_BLACKLIST_THRESHOLD, 3); + } + + + public synchronized void incNodeFailure(String host) { + int num = 0; + if (failureCountPerNode.containsKey(host)) { + num = failureCountPerNode.get(host); + } + num++; + failureCountPerNode.put(host, num); + + // black list the node if exceed max failure + if (num > maxFailurePerNode && !blackListedNodes.contains(host)) { + List blacklists = new ArrayList<>(); + blacklists.add(host); + blackListedNodes.add(host); + context.scheduler.getAmRMClient().updateBlacklist(blacklists, null); + LOG.info("[COMPONENT {}]: Failed {} times on this host, blacklisted {}." + + " Current list of blacklisted nodes: {}", + component.getName(), num, host, blackListedNodes); + } + } + + public synchronized void resetContainerFailures() { + // reset container failure counter per node + failureCountPerNode.clear(); + context.scheduler.getAmRMClient() + .updateBlacklist(null, new ArrayList<>(blackListedNodes)); + LOG.info("[COMPONENT {}]: Clearing blacklisted nodes {} ", + component.getName(), blackListedNodes); + blackListedNodes.clear(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java new file mode 100644 index 00000000000..94dbc6ee345 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.cache.LoadingCache; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + +public class ServiceContext { + public Service service = null; + public SliderFileSystem fs; + public String serviceHdfsDir = ""; + public ApplicationAttemptId attemptId; + public LoadingCache configCache; + public ServiceScheduler scheduler; + public ClientToAMTokenSecretManager secretManager; + public ClientAMService clientAMService; + + public ServiceContext() { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java new file mode 100644 index 00000000000..b0b4f067568 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Options; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; +import org.apache.hadoop.yarn.service.monitor.ServiceMonitor; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +public class ServiceMaster extends CompositeService { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceMaster.class); + + public static final String YARNFILE_OPTION = "yarnfile"; + + private static String serviceDefPath; + protected ServiceContext context; + + public ServiceMaster(String name) { + super(name); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + //TODO Deprecate slider conf, make sure works with yarn conf + printSystemEnv(); + if (UserGroupInformation.isSecurityEnabled()) { + UserGroupInformation.setConfiguration(conf); + } + LOG.info("Login user is {}", UserGroupInformation.getLoginUser()); + + context = new ServiceContext(); + Path appDir = getAppDir(); + context.serviceHdfsDir = appDir.toString(); + SliderFileSystem fs = new SliderFileSystem(conf); + context.fs = fs; + fs.setAppDir(appDir); + loadApplicationJson(context, fs); + + // Take yarn config from YarnFile and merge them into YarnConfiguration + for (Map.Entry entry : context.service + .getConfiguration().getProperties().entrySet()) { + conf.set(entry.getKey(), entry.getValue()); + } + + ContainerId amContainerId = getAMContainerId(); + + ApplicationAttemptId attemptId = amContainerId.getApplicationAttemptId(); + LOG.info("Service AppAttemptId: " + attemptId); + context.attemptId = attemptId; + + // configure AM to wait forever for RM + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1); + conf.unset(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS); + + DefaultMetricsSystem.initialize("ServiceAppMaster"); + + context.secretManager = new ClientToAMTokenSecretManager(attemptId, null); + ClientAMService clientAMService = new ClientAMService(context); + context.clientAMService = clientAMService; + addService(clientAMService); + + ServiceScheduler scheduler = createServiceScheduler(context); + addService(scheduler); + context.scheduler = scheduler; + + ServiceMonitor monitor = new ServiceMonitor("Service Monitor", context); + addService(monitor); + + super.serviceInit(conf); + } + + protected ContainerId getAMContainerId() throws BadClusterStateException { + return ContainerId.fromString(ServiceUtils.mandatoryEnvVariable( + ApplicationConstants.Environment.CONTAINER_ID.name())); + } + + protected Path getAppDir() { + return new Path(serviceDefPath).getParent(); + } + + protected ServiceScheduler createServiceScheduler(ServiceContext context) + throws IOException, YarnException { + return new ServiceScheduler(context); + } + + protected void loadApplicationJson(ServiceContext context, + SliderFileSystem fs) throws IOException { + context.service = ServiceApiUtil + .loadServiceFrom(fs, new Path(serviceDefPath)); + LOG.info(context.service.toString()); + } + + @Override + protected void serviceStop() throws Exception { + LOG.info("Stopping app master"); + super.serviceStop(); + } + + private void printSystemEnv() { + for (Map.Entry envs : System.getenv().entrySet()) { + LOG.info("{} = {}", envs.getKey(), envs.getValue()); + } + } + + public static void main(String[] args) throws Exception { + Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); + StringUtils.startupShutdownMessage(ServiceMaster.class, args, LOG); + try { + ServiceMaster serviceMaster = new ServiceMaster("Service Master"); + ShutdownHookManager.get() + .addShutdownHook(new CompositeServiceShutdownHook(serviceMaster), 30); + YarnConfiguration conf = new YarnConfiguration(); + Options opts = new Options(); + opts.addOption(YARNFILE_OPTION, true, "HDFS path to JSON service " + + "specification"); + opts.getOption(YARNFILE_OPTION).setRequired(true); + GenericOptionsParser parser = new GenericOptionsParser(conf, opts, args); + CommandLine cmdLine = parser.getCommandLine(); + serviceMaster.serviceDefPath = cmdLine.getOptionValue(YARNFILE_OPTION); + serviceMaster.init(conf); + serviceMaster.start(); + } catch (Throwable t) { + LOG.error("Error starting service master", t); + ExitUtil.terminate(1, "Error starting service master"); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMetrics.java new file mode 100644 index 00000000000..b5bbb7d0347 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMetrics.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; + +import static org.apache.hadoop.metrics2.lib.Interns.info; + +@Metrics(context = "yarn-native-service") +public class ServiceMetrics implements MetricsSource { + + @Metric("containers requested") + public MutableGaugeInt containersRequested; + + @Metric("containers running") + public MutableGaugeInt containersRunning; + + @Metric("containers ready") + public MutableGaugeInt containersReady; + + @Metric("containers desired") + public MutableGaugeInt containersDesired; + + @Metric("containers succeeded") + public MutableGaugeInt containersSucceeded; + + @Metric("containers failed") + public MutableGaugeInt containersFailed; + + @Metric("containers preempted") + public MutableGaugeInt containersPreempted; + + @Metric("containers surplus") + public MutableGaugeInt surplusContainers; + + @Metric("containers failed due to disk failure") + public MutableGaugeInt containersDiskFailure; + + protected final MetricsRegistry registry; + + public ServiceMetrics(MetricsInfo metricsInfo) { + registry = new MetricsRegistry(metricsInfo); + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + registry.snapshot(collector.addRecord(registry.info()), all); + } + + public static ServiceMetrics register(String name, String description) { + ServiceMetrics metrics = new ServiceMetrics(info(name, description)); + DefaultMetricsSystem.instance().register(name, description, metrics); + return metrics; + } + + public void tag(String name, String description, String value) { + registry.tag(name, description, value); + } + + @Override public String toString() { + return "ServiceMetrics{" + + "containersRequested=" + containersRequested.value() + + ", containersRunning=" + containersRunning.value() + + ", containersDesired=" + containersDesired.value() + + ", containersSucceeded=" + containersSucceeded.value() + + ", containersFailed=" + containersFailed.value() + + ", containersPreempted=" + containersPreempted.value() + + ", surplusContainers=" + surplusContainers.value() + '}'; + } +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java new file mode 100644 index 00000000000..a7b7e22d42c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -0,0 +1,691 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.api.RegistryOperationsFactory; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; +import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.UpdatedContainer; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.TimelineV2Client; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; +import org.apache.hadoop.yarn.client.api.async.NMClientAsync; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.service.api.ServiceApiConstants; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.service.component.ComponentEvent; +import org.apache.hadoop.yarn.service.component.ComponentEventType; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService; +import org.apache.hadoop.yarn.service.provider.ProviderUtils; +import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; +import org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink; +import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils; +import org.apache.hadoop.yarn.util.BoundedAppender; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.nio.ByteBuffer; +import java.text.MessageFormat; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.registry.client.api.RegistryConstants.*; +import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*; +import static org.apache.hadoop.yarn.service.component.ComponentEventType.*; + +/** + * + */ +public class ServiceScheduler extends CompositeService { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceScheduler.class); + private Service app; + + // component_name -> component + private final Map componentsByName = + new ConcurrentHashMap<>(); + + // id - > component + protected final Map componentsById = + new ConcurrentHashMap<>(); + + private final Map liveInstances = + new ConcurrentHashMap<>(); + + private ServiceMetrics serviceMetrics; + + private ServiceTimelinePublisher serviceTimelinePublisher; + + // Global diagnostics that will be reported to RM on eRxit. + // The unit the number of characters. This will be limited to 64 * 1024 + // characters. + private BoundedAppender diagnostics = new BoundedAppender(64 * 1024); + + // A cache for loading config files from remote such as hdfs + public LoadingCache configFileCache = null; + + public ScheduledExecutorService executorService; + public Map globalTokens = new HashMap<>(); + + private AMRMClientAsync amRMClient; + private NMClientAsync nmClient; + private AsyncDispatcher dispatcher; + AsyncDispatcher compInstanceDispatcher; + private YarnRegistryViewForProviders yarnRegistryOperations; + private ServiceContext context; + private ContainerLaunchService containerLaunchService; + + public ServiceScheduler(ServiceContext context) { + super(context.service.getName()); + this.context = context; + } + + public void buildInstance(ServiceContext context, Configuration configuration) + throws YarnException { + app = context.service; + executorService = Executors.newScheduledThreadPool(10); + RegistryOperations registryClient = RegistryOperationsFactory + .createInstance("ServiceScheduler", configuration); + addIfService(registryClient); + yarnRegistryOperations = + createYarnRegistryOperations(context, registryClient); + + // register metrics + serviceMetrics = ServiceMetrics + .register(app.getName(), "Metrics for service"); + serviceMetrics.tag("type", "Metrics type [component or service]", "service"); + serviceMetrics.tag("appId", "Service id for service", app.getId()); + + amRMClient = createAMRMClient(); + addIfService(amRMClient); + + nmClient = createNMClient(); + addIfService(nmClient); + + dispatcher = new AsyncDispatcher("Component dispatcher"); + dispatcher.register(ComponentEventType.class, + new ComponentEventHandler()); + dispatcher.setDrainEventsOnStop(); + addIfService(dispatcher); + + compInstanceDispatcher = + new AsyncDispatcher("CompInstance dispatcher"); + compInstanceDispatcher.register(ComponentInstanceEventType.class, + new ComponentInstanceEventHandler()); + addIfService(compInstanceDispatcher); + containerLaunchService = new ContainerLaunchService(context.fs); + addService(containerLaunchService); + + if (YarnConfiguration.timelineServiceV2Enabled(configuration)) { + TimelineV2Client timelineClient = TimelineV2Client + .createTimelineClient(context.attemptId.getApplicationId()); + amRMClient.registerTimelineV2Client(timelineClient); + serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient); + addService(serviceTimelinePublisher); + DefaultMetricsSystem.instance().register("ServiceMetricsSink", + "For processing metrics to ATS", + new ServiceMetricsSink(serviceTimelinePublisher)); + LOG.info("Timeline v2 is enabled."); + } + + initGlobalTokensForSubstitute(context); + //substitute quicklinks + ProviderUtils.substituteMapWithTokens(app.getQuicklinks(), globalTokens); + createConfigFileCache(context.fs.getFileSystem()); + + createAllComponents(); + } + + protected YarnRegistryViewForProviders createYarnRegistryOperations( + ServiceContext context, RegistryOperations registryClient) { + return new YarnRegistryViewForProviders(registryClient, + RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, app.getName(), + context.attemptId); + } + + protected NMClientAsync createNMClient() { + return NMClientAsync.createNMClientAsync(new NMClientCallback()); + } + + protected AMRMClientAsync createAMRMClient() { + return AMRMClientAsync + .createAMRMClientAsync(1000, new AMRMClientCallback()); + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + try { + buildInstance(context, conf); + } catch (YarnException e) { + throw new YarnRuntimeException(e); + } + super.serviceInit(conf); + } + + @Override + public void serviceStop() throws Exception { + LOG.info("Stopping service scheduler"); + + if (executorService != null) { + executorService.shutdownNow(); + } + + DefaultMetricsSystem.shutdown(); + if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) { + serviceTimelinePublisher + .serviceAttemptUnregistered(context, diagnostics.toString()); + } + String msg = diagnostics.toString() + + "Navigate to the failed component for more details."; + amRMClient + .unregisterApplicationMaster(FinalApplicationStatus.ENDED, msg, ""); + LOG.info("Service " + app.getName() + + " unregistered with RM, with attemptId = " + context.attemptId + + ", diagnostics = " + diagnostics); + super.serviceStop(); + } + + @Override + public void serviceStart() throws Exception { + super.serviceStart(); + InetSocketAddress bindAddress = context.clientAMService.getBindAddress(); + RegisterApplicationMasterResponse response = amRMClient + .registerApplicationMaster(bindAddress.getHostName(), + bindAddress.getPort(), "N/A"); + if (response.getClientToAMTokenMasterKey() != null + && response.getClientToAMTokenMasterKey().remaining() != 0) { + context.secretManager + .setMasterKey(response.getClientToAMTokenMasterKey().array()); + } + registerServiceInstance(context.attemptId, app); + + // recover components based on containers sent from RM + recoverComponents(response); + + for (Component component : componentsById.values()) { + // Trigger initial evaluation of components + if (component.areDependenciesReady()) { + LOG.info("Triggering initial evaluation of component {}", + component.getName()); + ComponentEvent event = new ComponentEvent(component.getName(), FLEX) + .setDesired(component.getComponentSpec().getNumberOfContainers()); + component.handle(event); + } + } + } + + private void recoverComponents(RegisterApplicationMasterResponse response) { + List recoveredContainers = response + .getContainersFromPreviousAttempts(); + LOG.info("Received {} containers from previous attempt.", + recoveredContainers.size()); + Map existingRecords = new HashMap<>(); + List existingComps = null; + try { + existingComps = yarnRegistryOperations.listComponents(); + LOG.info("Found {} containers from ZK registry: {}", existingComps.size(), + existingComps); + } catch (Exception e) { + LOG.info("Could not read component paths: {}", e.getMessage()); + } + if (existingComps != null) { + for (String existingComp : existingComps) { + try { + ServiceRecord record = + yarnRegistryOperations.getComponent(existingComp); + existingRecords.put(existingComp, record); + } catch (Exception e) { + LOG.warn("Could not resolve record for component {}: {}", + existingComp, e); + } + } + } + for (Container container : recoveredContainers) { + LOG.info("Handling container {} from previous attempt", + container.getId()); + ServiceRecord record = existingRecords.get(RegistryPathUtils + .encodeYarnID(container.getId().toString())); + if (record != null) { + Component comp = componentsById.get(container.getAllocationRequestId()); + ComponentEvent event = + new ComponentEvent(comp.getName(), CONTAINER_RECOVERED) + .setContainer(container) + .setInstance(comp.getComponentInstance(record.description)); + comp.handle(event); + // do not remove requests in this case because we do not know if they + // have already been removed + } else { + LOG.info("Record not found in registry for container {} from previous" + + " attempt, releasing", container.getId()); + amRMClient.releaseAssignedContainer(container.getId()); + } + } + } + + private void initGlobalTokensForSubstitute(ServiceContext context) { + // ZK + globalTokens.put(ServiceApiConstants.CLUSTER_ZK_QUORUM, getConfig() + .getTrimmed(KEY_REGISTRY_ZK_QUORUM, DEFAULT_REGISTRY_ZK_QUORUM)); + String user = null; + try { + user = UserGroupInformation.getCurrentUser().getShortUserName(); + } catch (IOException e) { + LOG.error("Failed to get user.", e); + } + globalTokens + .put(SERVICE_ZK_PATH, ServiceRegistryUtils.mkClusterPath(user, app.getName())); + + globalTokens.put(ServiceApiConstants.USER, user); + String dnsDomain = getConfig().getTrimmed(KEY_DNS_DOMAIN); + if (dnsDomain != null && !dnsDomain.isEmpty()) { + globalTokens.put(ServiceApiConstants.DOMAIN, dnsDomain); + } + // HDFS + String clusterFs = getConfig().getTrimmed(FS_DEFAULT_NAME_KEY); + if (clusterFs != null && !clusterFs.isEmpty()) { + globalTokens.put(ServiceApiConstants.CLUSTER_FS_URI, clusterFs); + globalTokens.put(ServiceApiConstants.CLUSTER_FS_HOST, + URI.create(clusterFs).getHost()); + } + globalTokens.put(SERVICE_HDFS_DIR, context.serviceHdfsDir); + // service name + globalTokens.put(SERVICE_NAME_LC, app.getName().toLowerCase()); + globalTokens.put(SERVICE_NAME, app.getName()); + } + + private void createConfigFileCache(final FileSystem fileSystem) { + this.configFileCache = + CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES) + .build(new CacheLoader() { + @Override public Object load(ConfigFile key) throws Exception { + switch (key.getType()) { + case HADOOP_XML: + try (FSDataInputStream input = fileSystem + .open(new Path(key.getSrcFile()))) { + org.apache.hadoop.conf.Configuration confRead = + new org.apache.hadoop.conf.Configuration(false); + confRead.addResource(input); + Map map = new HashMap<>(confRead.size()); + for (Map.Entry entry : confRead) { + map.put(entry.getKey(), entry.getValue()); + } + return map; + } + case TEMPLATE: + try (FSDataInputStream fileInput = fileSystem + .open(new Path(key.getSrcFile()))) { + return IOUtils.toString(fileInput); + } + default: + return null; + } + } + }); + context.configCache = configFileCache; + } + + private void registerServiceInstance(ApplicationAttemptId attemptId, + Service service) throws IOException { + LOG.info("Registering " + attemptId + ", " + service.getName() + + " into registry"); + ServiceRecord serviceRecord = new ServiceRecord(); + serviceRecord.set(YarnRegistryAttributes.YARN_ID, + attemptId.getApplicationId().toString()); + serviceRecord.set(YarnRegistryAttributes.YARN_PERSISTENCE, + PersistencePolicies.APPLICATION); + serviceRecord.description = "YarnServiceMaster"; + + executorService.submit(new Runnable() { + @Override public void run() { + try { + yarnRegistryOperations.registerSelf(serviceRecord, false); + LOG.info("Registered service under {}; absolute path {}", + yarnRegistryOperations.getSelfRegistrationPath(), + yarnRegistryOperations.getAbsoluteSelfRegistrationPath()); + boolean isFirstAttempt = 1 == attemptId.getAttemptId(); + // delete the children in case there are any and this is an AM startup. + // just to make sure everything underneath is purged + if (isFirstAttempt) { + yarnRegistryOperations.deleteChildren( + yarnRegistryOperations.getSelfRegistrationPath(), true); + } + } catch (IOException e) { + LOG.error( + "Failed to register app " + app.getName() + " in registry"); + } + } + }); + if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) { + serviceTimelinePublisher.serviceAttemptRegistered(app, getConfig()); + } + } + + private void createAllComponents() { + long allocateId = 0; + + // sort components by dependencies + Collection sortedComponents = + ServiceApiUtil.sortByDependencies(app.getComponents()); + + for (org.apache.hadoop.yarn.service.api.records.Component compSpec : sortedComponents) { + Component component = new Component(compSpec, allocateId, context); + componentsById.put(allocateId, component); + componentsByName.put(component.getName(), component); + allocateId++; + } + } + + private final class ComponentEventHandler + implements EventHandler { + @Override + public void handle(ComponentEvent event) { + Component component = componentsByName.get(event.getName()); + + if (component == null) { + LOG.error("No component exists for " + event.getName()); + return; + } + try { + component.handle(event); + } catch (Throwable t) { + LOG.error(MessageFormat + .format("[COMPONENT {0}]: Error in handling event type {1}", + component.getName(), event.getType()), t); + } + } + } + + private final class ComponentInstanceEventHandler + implements EventHandler { + @Override + public void handle(ComponentInstanceEvent event) { + ComponentInstance instance = + liveInstances.get(event.getContainerId()); + if (instance == null) { + LOG.error("No component instance exists for " + event.getContainerId()); + return; + } + try { + instance.handle(event); + } catch (Throwable t) { + LOG.error(instance.getCompInstanceId() + + ": Error in handling event type " + event.getType(), t); + } + } + } + + class AMRMClientCallback extends AMRMClientAsync.AbstractCallbackHandler { + + @Override + public void onContainersAllocated(List containers) { + LOG.info(containers.size() + " containers allocated. "); + for (Container container : containers) { + Component comp = componentsById.get(container.getAllocationRequestId()); + ComponentEvent event = + new ComponentEvent(comp.getName(), CONTAINER_ALLOCATED) + .setContainer(container); + dispatcher.getEventHandler().handle(event); + Collection requests = amRMClient + .getMatchingRequests(container.getAllocationRequestId()); + LOG.info("[COMPONENT {}]: {} outstanding container requests.", + comp.getName(), requests.size()); + // remove the corresponding request + if (requests.iterator().hasNext()) { + LOG.info("[COMPONENT {}]: removing one container request.", comp + .getName()); + AMRMClient.ContainerRequest request = requests.iterator().next(); + amRMClient.removeContainerRequest(request); + } + } + } + + @Override + public void onContainersCompleted(List statuses) { + for (ContainerStatus status : statuses) { + ContainerId containerId = status.getContainerId(); + ComponentInstance instance = liveInstances.get(status.getContainerId()); + if (instance == null) { + LOG.warn( + "Container {} Completed. No component instance exists. exitStatus={}. diagnostics={} ", + containerId, status.getExitStatus(), status.getDiagnostics()); + return; + } + ComponentEvent event = + new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED) + .setStatus(status).setInstance(instance); + dispatcher.getEventHandler().handle(event); + } + } + + @Override + public void onContainersUpdated(List containers) { + } + + @Override public void onShutdownRequest() { + //Was used for non-work-preserving restart in YARN, should be deprecated. + } + + @Override public void onNodesUpdated(List updatedNodes) { + StringBuilder str = new StringBuilder(); + str.append("Nodes updated info: ").append(System.lineSeparator()); + for (NodeReport report : updatedNodes) { + str.append(report.getNodeId()).append(", state = ") + .append(report.getNodeState()).append(", healthDiagnostics = ") + .append(report.getHealthReport()).append(System.lineSeparator()); + } + LOG.warn(str.toString()); + } + + @Override public float getProgress() { + // get running containers over desired containers + long total = 0; + for (org.apache.hadoop.yarn.service.api.records.Component component : app + .getComponents()) { + total += component.getNumberOfContainers(); + } + // Probably due to user flexed down to 0 + if (total == 0) { + return 100; + } + return Math.max((float) liveInstances.size() / total * 100, 100); + } + + @Override public void onError(Throwable e) { + LOG.error("Error in AMRMClient callback handler ", e); + } + } + + + private class NMClientCallback extends NMClientAsync.AbstractCallbackHandler { + + @Override public void onContainerStarted(ContainerId containerId, + Map allServiceResponse) { + ComponentInstance instance = liveInstances.get(containerId); + if (instance == null) { + LOG.error("No component instance exists for " + containerId); + return; + } + ComponentEvent event = + new ComponentEvent(instance.getCompName(), CONTAINER_STARTED) + .setInstance(instance); + dispatcher.getEventHandler().handle(event); + } + + @Override public void onContainerStatusReceived(ContainerId containerId, + ContainerStatus containerStatus) { + + } + + @Override public void onContainerStopped(ContainerId containerId) { + + } + + @Override + public void onStartContainerError(ContainerId containerId, Throwable t) { + ComponentInstance instance = liveInstances.get(containerId); + if (instance == null) { + LOG.error("No component instance exists for " + containerId); + return; + } + LOG.error("Failed to start " + containerId, t); + amRMClient.releaseAssignedContainer(containerId); + // After container released, it'll get CONTAINER_COMPLETED event from RM + // automatically which will trigger stopping COMPONENT INSTANCE + } + + @Override public void onContainerResourceIncreased(ContainerId containerId, + Resource resource) { + + } + + @Override public void onContainerResourceUpdated(ContainerId containerId, + Resource resource) { + + } + + @Override public void onGetContainerStatusError(ContainerId containerId, + Throwable t) { + + } + + @Override + public void onIncreaseContainerResourceError(ContainerId containerId, + Throwable t) { + + } + + @Override + public void onUpdateContainerResourceError(ContainerId containerId, + Throwable t) { + + } + + @Override + public void onStopContainerError(ContainerId containerId, Throwable t) { + + } + } + + public ServiceMetrics getServiceMetrics() { + return serviceMetrics; + } + + public AMRMClientAsync getAmRMClient() { + return amRMClient; + } + + public NMClientAsync getNmClient() { + return nmClient; + } + + public void addLiveCompInstance(ContainerId containerId, + ComponentInstance instance) { + liveInstances.put(containerId, instance); + } + + public void removeLiveCompInstance(ContainerId containerId) { + liveInstances.remove(containerId); + } + + public AsyncDispatcher getCompInstanceDispatcher() { + return compInstanceDispatcher; + } + + public YarnRegistryViewForProviders getYarnRegistryOperations() { + return yarnRegistryOperations; + } + + public ServiceTimelinePublisher getServiceTimelinePublisher() { + return serviceTimelinePublisher; + } + + public Map getLiveInstances() { + return liveInstances; + } + + public ContainerLaunchService getContainerLaunchService() { + return containerLaunchService; + } + + public ServiceContext getContext() { + return context; + } + + public Map getAllComponents() { + return componentsByName; + } + + public Service getApp() { + return app; + } + + public AsyncDispatcher getDispatcher() { + return dispatcher; + } + + public BoundedAppender getDiagnostics() { + return diagnostics; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/ServiceApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/ServiceApiConstants.java new file mode 100644 index 00000000000..a4f2243d227 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/ServiceApiConstants.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$; + +/** + * This class defines constants that can be used in input spec for + * variable substitutions + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface ServiceApiConstants { + + // Constants for service + String SERVICE_NAME = $("SERVICE_NAME"); + + String SERVICE_NAME_LC = $("SERVICE_NAME.lc"); + + String USER = $("USER"); + + String DOMAIN = $("DOMAIN"); + + // Constants for component + String COMPONENT_NAME = $("COMPONENT_NAME"); + + String COMPONENT_NAME_LC = $("COMPONENT_NAME.lc"); + + String COMPONENT_INSTANCE_NAME = $("COMPONENT_INSTANCE_NAME"); + + // Constants for component instance + String COMPONENT_ID = $("COMPONENT_ID"); + + String CONTAINER_ID = $("CONTAINER_ID"); + + // Templates for component instance host/IP + String COMPONENT_INSTANCE_HOST = $("%s_HOST"); + + String COMPONENT_INSTANCE_IP = $("%s_IP"); + + // Constants for default cluster ZK + String CLUSTER_ZK_QUORUM = $("CLUSTER_ZK_QUORUM"); + + // URI for the default cluster fs + String CLUSTER_FS_URI = $("CLUSTER_FS_URI"); + + // the host component of the cluster fs UI + String CLUSTER_FS_HOST = $("CLUSTER_FS_HOST"); + + // Path in zookeeper for a specific service + String SERVICE_ZK_PATH = $("SERVICE_ZK_PATH"); + + // Constants for service specific hdfs dir + String SERVICE_HDFS_DIR = $("SERVICE_HDFS_DIR"); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java new file mode 100644 index 00000000000..ce062cc8384 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlType; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Artifact of an service component. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Artifact of an service component") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Artifact implements Serializable { + private static final long serialVersionUID = 3608929500111099035L; + + private String id = null; + + /** + * Artifact Type. DOCKER, TARBALL or SERVICE + **/ + @XmlType(name = "artifact_type") + @XmlEnum + public enum TypeEnum { + DOCKER("DOCKER"), TARBALL("TARBALL"), SERVICE("SERVICE"); + + private String value; + + TypeEnum(String value) { + this.value = value; + } + + @Override + @JsonValue + public String toString() { + return value; + } + } + + private TypeEnum type = TypeEnum.DOCKER; + private String uri = null; + + /** + * Artifact id. Examples are package location uri for tarball based services, + * image name for docker, etc. + **/ + public Artifact id(String id) { + this.id = id; + return this; + } + + @ApiModelProperty(example = "null", required = true, value = "Artifact id. Examples are package location uri for tarball based services, image name for docker, etc.") + @JsonProperty("id") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * Artifact type, like docker, tarball, etc. (optional). + **/ + public Artifact type(TypeEnum type) { + this.type = type; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact type, like docker, tarball, etc. (optional).") + @JsonProperty("type") + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + /** + * Artifact location to support multiple artifact stores (optional). + **/ + public Artifact uri(String uri) { + this.uri = uri; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact location to support multiple artifact stores (optional).") + @JsonProperty("uri") + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Artifact artifact = (Artifact) o; + return Objects.equals(this.id, artifact.id) + && Objects.equals(this.type, artifact.type) + && Objects.equals(this.uri, artifact.uri); + } + + @Override + public int hashCode() { + return Objects.hash(id, type, uri); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Artifact {\n"); + + sb.append(" id: ").append(toIndentedString(id)).append("\n"); + sb.append(" type: ").append(toIndentedString(type)).append("\n"); + sb.append(" uri: ").append(toIndentedString(uri)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java new file mode 100644 index 00000000000..7ac86d4c66f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class BaseResource implements Serializable { + private static final long serialVersionUID = 1492603053176889431L; + + private String uri; + + /** + * Resource location for a service, e.g. + * /ws/v1/services/helloworld + * + **/ + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("BaseResource [uri="); + builder.append(uri); + builder.append("]"); + return builder.toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java new file mode 100644 index 00000000000..fe9c043106c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java @@ -0,0 +1,430 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * One or more components of the service. If the service is HBase say, + * then the component can be a simple role like master or regionserver. If the + * service is a complex business webapp then a component can be other + * services say Kafka or Storm. Thereby it opens up the support for complex + * and nested services. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Component implements Serializable { + private static final long serialVersionUID = -8430058381509087805L; + + private String name = null; + private List dependencies = new ArrayList(); + private ReadinessCheck readinessCheck = null; + private Artifact artifact = null; + private String launchCommand = null; + private Resource resource = null; + private Long numberOfContainers = null; + private Boolean runPrivilegedContainer = false; + private PlacementPolicy placementPolicy = null; + private ComponentState state = ComponentState.FLEXING; + private Configuration configuration = new Configuration(); + private List quicklinks = new ArrayList(); + private List containers = + Collections.synchronizedList(new ArrayList()); + + /** + * Name of the service component (mandatory). + **/ + public Component name(String name) { + this.name = name; + return this; + } + + @ApiModelProperty(example = "null", required = true, value = "Name of the service component (mandatory).") + @JsonProperty("name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * An array of service components which should be in READY state (as + * defined by readiness check), before this component can be started. The + * dependencies across all components of a service should be represented + * as a DAG. + **/ + public Component dependencies(List dependencies) { + this.dependencies = dependencies; + return this; + } + + @ApiModelProperty(example = "null", value = "An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of an service should be represented as a DAG.") + @JsonProperty("dependencies") + public List getDependencies() { + return dependencies; + } + + public void setDependencies(List dependencies) { + this.dependencies = dependencies; + } + + /** + * Readiness check for this component. + **/ + public Component readinessCheck(ReadinessCheck readinessCheck) { + this.readinessCheck = readinessCheck; + return this; + } + + @ApiModelProperty(example = "null", value = "Readiness check for this component.") + @JsonProperty("readiness_check") + public ReadinessCheck getReadinessCheck() { + return readinessCheck; + } + + @XmlElement(name = "readiness_check") + public void setReadinessCheck(ReadinessCheck readinessCheck) { + this.readinessCheck = readinessCheck; + } + + /** + * Artifact of the component (optional). If not specified, the service + * level global artifact takes effect. + **/ + public Component artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact of the component (optional). If not specified, the service level global artifact takes effect.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + /** + * The custom launch command of this component (optional). When specified at + * the component level, it overrides the value specified at the global level + * (if any). + **/ + public Component launchCommand(String launchCommand) { + this.launchCommand = launchCommand; + return this; + } + + @ApiModelProperty(example = "null", value = "The custom launch command of this component (optional). When specified at the component level, it overrides the value specified at the global level (if any).") + @JsonProperty("launch_command") + public String getLaunchCommand() { + return launchCommand; + } + + @XmlElement(name = "launch_command") + public void setLaunchCommand(String launchCommand) { + this.launchCommand = launchCommand; + } + + /** + * Resource of this component (optional). If not specified, the service + * level global resource takes effect. + **/ + public Component resource(Resource resource) { + this.resource = resource; + return this; + } + + @ApiModelProperty(example = "null", value = "Resource of this component (optional). If not specified, the service level global resource takes effect.") + @JsonProperty("resource") + public Resource getResource() { + return resource; + } + + public void setResource(Resource resource) { + this.resource = resource; + } + + /** + * Number of containers for this component (optional). If not specified, + * the service level global number_of_containers takes effect. + **/ + public Component numberOfContainers(Long numberOfContainers) { + this.numberOfContainers = numberOfContainers; + return this; + } + + @ApiModelProperty(example = "null", value = "Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.") + @JsonProperty("number_of_containers") + public Long getNumberOfContainers() { + return numberOfContainers; + } + + @XmlElement(name = "number_of_containers") + public void setNumberOfContainers(Long numberOfContainers) { + this.numberOfContainers = numberOfContainers; + } + + @ApiModelProperty(example = "null", value = "Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started service.") + @JsonProperty("containers") + public List getContainers() { + return containers; + } + + public void setContainers(List containers) { + this.containers = containers; + } + + public void addContainer(Container container) { + this.containers.add(container); + } + + public void removeContainer(Container container) { + containers.remove(container); + } + public Container getContainer(String id) { + for (Container container : containers) { + if (container.getId().equals(id)) { + return container; + } + } + return null; + } + + /** + * Run all containers of this component in privileged mode (YARN-4262). + **/ + public Component runPrivilegedContainer(Boolean runPrivilegedContainer) { + this.runPrivilegedContainer = runPrivilegedContainer; + return this; + } + + @ApiModelProperty(example = "null", value = "Run all containers of this component in privileged mode (YARN-4262).") + @JsonProperty("run_privileged_container") + public Boolean getRunPrivilegedContainer() { + return runPrivilegedContainer; + } + + @XmlElement(name = "run_privileged_container") + public void setRunPrivilegedContainer(Boolean runPrivilegedContainer) { + this.runPrivilegedContainer = runPrivilegedContainer; + } + + /** + * Advanced scheduling and placement policies for all containers of this + * component (optional). If not specified, the service level placement_policy + * takes effect. Refer to the description at the global level for more + * details. + **/ + public Component placementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + return this; + } + + @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details.") + @JsonProperty("placement_policy") + public PlacementPolicy getPlacementPolicy() { + return placementPolicy; + } + + @XmlElement(name = "placement_policy") + public void setPlacementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + } + + /** + * Config properties for this component. + **/ + public Component configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } + + @ApiModelProperty(example = "null", value = "Config properties for this component.") + @JsonProperty("configuration") + public Configuration getConfiguration() { + return configuration; + } + + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + + /** + * A list of quicklink keys defined at the service level, and to be + * resolved by this component. + **/ + public Component quicklinks(List quicklinks) { + this.quicklinks = quicklinks; + return this; + } + + @ApiModelProperty(example = "null", value = "A list of quicklink keys defined at the service level, and to be resolved by this component.") + @JsonProperty("quicklinks") + public List getQuicklinks() { + return quicklinks; + } + + public void setQuicklinks(List quicklinks) { + this.quicklinks = quicklinks; + } + + public Component state(ComponentState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "State of the component.") + @JsonProperty("state") + public ComponentState getState() { + return state; + } + + public void setState(ComponentState state) { + this.state = state; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Component component = (Component) o; + return Objects.equals(this.name, component.name) + && Objects.equals(this.dependencies, component.dependencies) + && Objects.equals(this.readinessCheck, component.readinessCheck) + && Objects.equals(this.artifact, component.artifact) + && Objects.equals(this.launchCommand, component.launchCommand) + && Objects.equals(this.resource, component.resource) + && Objects.equals(this.numberOfContainers, component.numberOfContainers) + && Objects.equals(this.runPrivilegedContainer, + component.runPrivilegedContainer) + && Objects.equals(this.placementPolicy, component.placementPolicy) + && Objects.equals(this.configuration, component.configuration) + && Objects.equals(this.quicklinks, component.quicklinks) + && Objects.equals(this.state, component.state); + } + + @Override + public int hashCode() { + return Objects.hash(name, dependencies, readinessCheck, artifact, + launchCommand, resource, numberOfContainers, + runPrivilegedContainer, placementPolicy, configuration, quicklinks, state); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Component {\n"); + + sb.append(" name: ").append(toIndentedString(name)).append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" dependencies: ").append(toIndentedString(dependencies)) + .append("\n"); + sb.append(" readinessCheck: ").append(toIndentedString(readinessCheck)) + .append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append(" launchCommand: ").append(toIndentedString(launchCommand)) + .append("\n"); + sb.append(" resource: ").append(toIndentedString(resource)).append("\n"); + sb.append(" numberOfContainers: ") + .append(toIndentedString(numberOfContainers)).append("\n"); + sb.append(" containers: ").append(toIndentedString(containers)) + .append("\n"); + sb.append(" runPrivilegedContainer: ") + .append(toIndentedString(runPrivilegedContainer)).append("\n"); + sb.append(" placementPolicy: ").append(toIndentedString(placementPolicy)) + .append("\n"); + sb.append(" configuration: ").append(toIndentedString(configuration)) + .append("\n"); + sb.append(" quicklinks: ").append(toIndentedString(quicklinks)) + .append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + /** + * Merge from another component into this component without overwriting. + */ + public void mergeFrom(Component that) { + if (this.getArtifact() == null) { + this.setArtifact(that.getArtifact()); + } + if (this.getResource() == null) { + this.setResource(that.getResource()); + } + if (this.getNumberOfContainers() == null) { + this.setNumberOfContainers(that.getNumberOfContainers()); + } + if (this.getLaunchCommand() == null) { + this.setLaunchCommand(that.getLaunchCommand()); + } + this.getConfiguration().mergeFrom(that.getConfiguration()); + if (this.getQuicklinks() == null) { + this.setQuicklinks(that.getQuicklinks()); + } + if (this.getRunPrivilegedContainer() == null) { + this.setRunPrivilegedContainer(that.getRunPrivilegedContainer()); + } + if (this.getDependencies() == null) { + this.setDependencies(that.getDependencies()); + } + if (this.getPlacementPolicy() == null) { + this.setPlacementPolicy(that.getPlacementPolicy()); + } + if (this.getReadinessCheck() == null) { + this.setReadinessCheck(that.getReadinessCheck()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ComponentState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ComponentState.java new file mode 100644 index 00000000000..702a9ae2c31 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ComponentState.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The current state of a component.") +public enum ComponentState { + FLEXING, STABLE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java new file mode 100644 index 00000000000..984e6f7cfb4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java @@ -0,0 +1,233 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A config file that needs to be created and made available as a volume in an + * service component container. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "A config file that needs to be created and made available as a volume in an service component container.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ConfigFile implements Serializable { + private static final long serialVersionUID = -7009402089417704612L; + + /** + * Config Type. XML, JSON, YAML, TEMPLATE and HADOOP_XML are supported. + **/ + @XmlType(name = "config_type") + @XmlEnum + public enum TypeEnum { + XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE( + "TEMPLATE"), HADOOP_XML("HADOOP_XML"),; + + private String value; + + TypeEnum(String value) { + this.value = value; + } + + @Override + @JsonValue + public String toString() { + return value; + } + } + + private TypeEnum type = null; + private String destFile = null; + private String srcFile = null; + private Map properties = new HashMap<>(); + + public ConfigFile copy() { + ConfigFile copy = new ConfigFile(); + copy.setType(this.getType()); + copy.setSrcFile(this.getSrcFile()); + copy.setDestFile(this.getDestFile()); + if (this.getProperties() != null && !this.getProperties().isEmpty()) { + copy.getProperties().putAll(this.getProperties()); + } + return copy; + } + + /** + * Config file in the standard format like xml, properties, json, yaml, + * template. + **/ + public ConfigFile type(TypeEnum type) { + this.type = type; + return this; + } + + @ApiModelProperty(example = "null", value = "Config file in the standard format like xml, properties, json, yaml, template.") + @JsonProperty("type") + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + /** + * The absolute path that this configuration file should be mounted as, in the + * service container. + **/ + public ConfigFile destFile(String destFile) { + this.destFile = destFile; + return this; + } + + @ApiModelProperty(example = "null", value = "The absolute path that this configuration file should be mounted as, in the service container.") + @JsonProperty("dest_file") + public String getDestFile() { + return destFile; + } + + @XmlElement(name = "dest_file") + public void setDestFile(String destFile) { + this.destFile = destFile; + } + + /** + * This provides the source location of the configuration file, the content + * of which is dumped to dest_file post property substitutions, in the format + * as specified in type. Typically the src_file would point to a source + * controlled network accessible file maintained by tools like puppet, chef, + * or hdfs etc. Currently, only hdfs is supported. + **/ + public ConfigFile srcFile(String srcFile) { + this.srcFile = srcFile; + return this; + } + + @ApiModelProperty(example = "null", value = "This provides the source location of the configuration file, " + + "the content of which is dumped to dest_file post property substitutions, in the format as specified in type. " + + "Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.") + @JsonProperty("src_file") + public String getSrcFile() { + return srcFile; + } + + @XmlElement(name = "src_file") + public void setSrcFile(String srcFile) { + this.srcFile = srcFile; + } + + /** + A blob of key value pairs that will be dumped in the dest_file in the format + as specified in type. If src_file is specified, src_file content are dumped + in the dest_file and these properties will overwrite, if any, existing + properties in src_file or be added as new properties in src_file. + **/ + public ConfigFile properties(Map properties) { + this.properties = properties; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type." + + " If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any," + + " existing properties in src_file or be added as new properties in src_file.") + @JsonProperty("properties") + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + public long getLong(String name, long defaultValue) { + if (name == null) { + return defaultValue; + } + String value = properties.get(name.trim()); + return Long.parseLong(value); + } + + public boolean getBoolean(String name, boolean defaultValue) { + if (name == null) { + return defaultValue; + } + return Boolean.valueOf(properties.get(name.trim())); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ConfigFile configFile = (ConfigFile) o; + return Objects.equals(this.type, configFile.type) + && Objects.equals(this.destFile, configFile.destFile) + && Objects.equals(this.srcFile, configFile.srcFile); + } + + @Override + public int hashCode() { + return Objects.hash(type, destFile, srcFile, properties); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ConfigFile {\n"); + + sb.append(" type: ").append(toIndentedString(type)).append("\n"); + sb.append(" destFile: ").append(toIndentedString(destFile)).append("\n"); + sb.append(" srcFile: ").append(toIndentedString(srcFile)).append("\n"); + sb.append(" properties: ").append(toIndentedString(properties)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java new file mode 100644 index 00000000000..e10305acf4f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.util.Locale; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public enum ConfigFormat { + + JSON("json"), + PROPERTIES("properties"), + XML("xml"), + HADOOP_XML("hadoop_xml"), + ENV("env"), + TEMPLATE("template"), + YAML("yaml"), + ; + ConfigFormat(String suffix) { + this.suffix = suffix; + } + + private final String suffix; + + public String getSuffix() { + return suffix; + } + + + @Override + public String toString() { + return suffix; + } + + /** + * Get a matching format or null + * @param type + * @return the format + */ + public static ConfigFormat resolve(String type) { + for (ConfigFormat format: values()) { + if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) { + return format; + } + } + return null; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java new file mode 100644 index 00000000000..859c1ea7cd0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Set of configuration properties that can be injected into the service + * components via envs, files and custom pluggable helper docker containers. + * Files of several standard formats like xml, properties, json, yaml and + * templates will be supported. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Configuration implements Serializable { + private static final long serialVersionUID = -4330788704981074466L; + + private Map properties = new HashMap(); + private Map env = new HashMap(); + private List files = new ArrayList(); + + /** + * A blob of key-value pairs of common service properties. + **/ + public Configuration properties(Map properties) { + this.properties = properties; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key-value pairs of common service properties.") + @JsonProperty("properties") + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + /** + * A blob of key-value pairs which will be appended to the default system + * properties and handed off to the service at start time. All placeholder + * references to properties will be substituted before injection. + **/ + public Configuration env(Map env) { + this.env = env; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.") + @JsonProperty("env") + public Map getEnv() { + return env; + } + + public void setEnv(Map env) { + this.env = env; + } + + /** + * Array of list of files that needs to be created and made available as + * volumes in the service component containers. + **/ + public Configuration files(List files) { + this.files = files; + return this; + } + + @ApiModelProperty(example = "null", value = "Array of list of files that needs to be created and made available as volumes in the service component containers.") + @JsonProperty("files") + public List getFiles() { + return files; + } + + public void setFiles(List files) { + this.files = files; + } + + public long getPropertyLong(String name, long defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return Long.parseLong(value); + } + + public int getPropertyInt(String name, int defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return Integer.parseInt(value); + } + + public boolean getPropertyBool(String name, boolean defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return Boolean.parseBoolean(value); + } + + public String getProperty(String name, String defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return value; + } + + public void setProperty(String name, String value) { + properties.put(name, value); + } + + public String getProperty(String name) { + return properties.get(name.trim()); + } + + public String getEnv(String name) { + return env.get(name.trim()); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Configuration configuration = (Configuration) o; + return Objects.equals(this.properties, configuration.properties) + && Objects.equals(this.env, configuration.env) + && Objects.equals(this.files, configuration.files); + } + + @Override + public int hashCode() { + return Objects.hash(properties, env, files); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Configuration {\n"); + + sb.append(" properties: ").append(toIndentedString(properties)) + .append("\n"); + sb.append(" env: ").append(toIndentedString(env)).append("\n"); + sb.append(" files: ").append(toIndentedString(files)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + /** + * Merge all properties and envs from that configuration to this configration. + * For ConfigFiles, all properties and envs of that ConfigFile are merged into + * this ConfigFile. + */ + public synchronized void mergeFrom(Configuration that) { + ServiceUtils.mergeMapsIgnoreDuplicateKeys(this.properties, that + .getProperties()); + ServiceUtils.mergeMapsIgnoreDuplicateKeys(this.env, that.getEnv()); + + Map thatMap = new HashMap<>(); + for (ConfigFile file : that.getFiles()) { + thatMap.put(file.getDestFile(), file.copy()); + } + for (ConfigFile thisFile : files) { + if(thatMap.containsKey(thisFile.getDestFile())) { + ConfigFile thatFile = thatMap.get(thisFile.getDestFile()); + ServiceUtils.mergeMapsIgnoreDuplicateKeys(thisFile.getProperties(), + thatFile.getProperties()); + thatMap.remove(thisFile.getDestFile()); + } + } + // add remaining new files from that Configration + for (ConfigFile thatFile : thatMap.values()) { + files.add(thatFile.copy()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java new file mode 100644 index 00000000000..af065424ed6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.util.Date; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * An instance of a running service container. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "An instance of a running service container") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Container extends BaseResource { + private static final long serialVersionUID = -8955788064529288L; + + private String id = null; + private Date launchTime = null; + private String ip = null; + private String hostname = null; + private String bareHost = null; + private ContainerState state = null; + private String componentInstanceName = null; + private Resource resource = null; + private Artifact artifact = null; + private Boolean privilegedContainer = null; + + /** + * Unique container id of a running service, e.g. + * container_e3751_1458061340047_0008_01_000002. + **/ + public Container id(String id) { + this.id = id; + return this; + } + + @ApiModelProperty(example = "null", value = "Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.") + @JsonProperty("id") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. + * This will most likely be different from cluster launch time. + **/ + public Container launchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + return this; + } + + @ApiModelProperty(example = "null", value = "The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.") + @JsonProperty("launch_time") + public Date getLaunchTime() { + return launchTime == null ? null : (Date) launchTime.clone(); + } + + @XmlElement(name = "launch_time") + public void setLaunchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + } + + /** + * IP address of a running container, e.g. 172.31.42.141. The IP address and + * hostname attribute values are dependent on the cluster/docker network setup + * as per YARN-4007. + **/ + public Container ip(String ip) { + this.ip = ip; + return this; + } + + @ApiModelProperty(example = "null", value = "IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.") + @JsonProperty("ip") + public String getIp() { + return ip; + } + + public void setIp(String ip) { + this.ip = ip; + } + + /** + * Fully qualified hostname of a running container, e.g. + * ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and + * hostname attribute values are dependent on the cluster/docker network setup + * as per YARN-4007. + **/ + public Container hostname(String hostname) { + this.hostname = hostname; + return this; + } + + @ApiModelProperty(example = "null", value = "Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.") + @JsonProperty("hostname") + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + /** + * The bare node or host in which the container is running, e.g. + * cn008.example.com. + **/ + public Container bareHost(String bareHost) { + this.bareHost = bareHost; + return this; + } + + @ApiModelProperty(example = "null", value = "The bare node or host in which the container is running, e.g. cn008.example.com.") + @JsonProperty("bare_host") + public String getBareHost() { + return bareHost; + } + + @XmlElement(name = "bare_host") + public void setBareHost(String bareHost) { + this.bareHost = bareHost; + } + + /** + * State of the container of an service. + **/ + public Container state(ContainerState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "State of the container of an service.") + @JsonProperty("state") + public ContainerState getState() { + return state; + } + + public void setState(ContainerState state) { + this.state = state; + } + + /** + * Name of the component that this container instance belongs to. + **/ + public Container componentName(String componentName) { + this.componentInstanceName = componentName; + return this; + } + + @ApiModelProperty(example = "null", value = "Name of the component that this container instance belongs to.") + @JsonProperty("component_name") + public String getComponentInstanceName() { + return componentInstanceName; + } + + @XmlElement(name = "component_name") + public void setComponentInstanceName(String componentInstanceName) { + this.componentInstanceName = componentInstanceName; + } + + /** + * Resource used for this container. + **/ + public Container resource(Resource resource) { + this.resource = resource; + return this; + } + + @ApiModelProperty(example = "null", value = "Resource used for this container.") + @JsonProperty("resource") + public Resource getResource() { + return resource; + } + + public void setResource(Resource resource) { + this.resource = resource; + } + + /** + * Artifact used for this container. + **/ + public Container artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact used for this container.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + /** + * Container running in privileged mode or not. + **/ + public Container privilegedContainer(Boolean privilegedContainer) { + this.privilegedContainer = privilegedContainer; + return this; + } + + @ApiModelProperty(example = "null", value = "Container running in privileged mode or not.") + @JsonProperty("privileged_container") + public Boolean getPrivilegedContainer() { + return privilegedContainer; + } + + public void setPrivilegedContainer(Boolean privilegedContainer) { + this.privilegedContainer = privilegedContainer; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Container container = (Container) o; + return Objects.equals(this.id, container.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Container {\n"); + + sb.append(" id: ").append(toIndentedString(id)).append("\n"); + sb.append(" launchTime: ").append(toIndentedString(launchTime)) + .append("\n"); + sb.append(" ip: ").append(toIndentedString(ip)).append("\n"); + sb.append(" hostname: ").append(toIndentedString(hostname)).append("\n"); + sb.append(" bareHost: ").append(toIndentedString(bareHost)).append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" componentInstanceName: ").append(toIndentedString( + componentInstanceName)) + .append("\n"); + sb.append(" resource: ").append(toIndentedString(resource)).append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append(" privilegedContainer: ") + .append(toIndentedString(privilegedContainer)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ContainerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ContainerState.java new file mode 100644 index 00000000000..bf09ff2442f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ContainerState.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The current state of the container of an application. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public enum ContainerState { + RUNNING_BUT_UNREADY, READY, STOPPED +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Error.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Error.java new file mode 100644 index 00000000000..c64b1b579ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Error.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class Error { + + private Integer code = null; + private String message = null; + private String fields = null; + + /** + **/ + public Error code(Integer code) { + this.code = code; + return this; + } + + @ApiModelProperty(example = "null", value = "") + @JsonProperty("code") + public Integer getCode() { + return code; + } + + public void setCode(Integer code) { + this.code = code; + } + + /** + **/ + public Error message(String message) { + this.message = message; + return this; + } + + @ApiModelProperty(example = "null", value = "") + @JsonProperty("message") + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + /** + **/ + public Error fields(String fields) { + this.fields = fields; + return this; + } + + @ApiModelProperty(example = "null", value = "") + @JsonProperty("fields") + public String getFields() { + return fields; + } + + public void setFields(String fields) { + this.fields = fields; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Error error = (Error) o; + return Objects.equals(this.code, error.code) + && Objects.equals(this.message, error.message) + && Objects.equals(this.fields, error.fields); + } + + @Override + public int hashCode() { + return Objects.hash(code, message, fields); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Error {\n"); + + sb.append(" code: ").append(toIndentedString(code)).append("\n"); + sb.append(" message: ").append(toIndentedString(message)).append("\n"); + sb.append(" fields: ").append(toIndentedString(fields)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementPolicy.java new file mode 100644 index 00000000000..6f6fe6fc82d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementPolicy.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Placement policy of an instance of an service. This feature is in the + * works in YARN-4902. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Placement policy of an instance of an service. This feature is in the works in YARN-4902.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class PlacementPolicy implements Serializable { + private static final long serialVersionUID = 4341110649551172231L; + + private String label = null; + + /** + * Assigns a service to a named partition of the cluster where the service + * desires to run (optional). If not specified all services are submitted to + * a default label of the service owner. One or more labels can be setup for + * each service owner account with required constraints like no-preemption, + * sla-99999, preemption-ok, etc. + **/ + public PlacementPolicy label(String label) { + this.label = label; + return this; + } + + @ApiModelProperty(example = "null", value = "Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.") + @JsonProperty("label") + public String getLabel() { + return label; + } + + public void setLabel(String label) { + this.label = label; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PlacementPolicy placementPolicy = (PlacementPolicy) o; + return Objects.equals(this.label, placementPolicy.label); + } + + @Override + public int hashCode() { + return Objects.hash(label); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class PlacementPolicy {\n"); + + sb.append(" label: ").append(toIndentedString(label)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java new file mode 100644 index 00000000000..0a3713c6d0a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlType; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A custom command or a pluggable helper container to determine the readiness + * of a container of a component. Readiness for every service is different. + * Hence the need for a simple interface, with scope to support advanced + * usecases. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class ReadinessCheck implements Serializable { + private static final long serialVersionUID = -3836839816887186801L; + + /** + * Type. HTTP and PORT + **/ + @XmlType(name = "type") + @XmlEnum + public enum TypeEnum { + HTTP("HTTP"), + PORT("PORT"); + + private String value; + + TypeEnum(String value) { + this.value = value; + } + + @Override + @JsonValue + public String toString() { + return value; + } + } + + private TypeEnum type = null; + private Map properties = new HashMap(); + private Artifact artifact = null; + + /** + * E.g. HTTP (YARN will perform a simple REST call at a regular interval and + * expect a 204 No content). + **/ + public ReadinessCheck type(TypeEnum type) { + this.type = type; + return this; + } + + @ApiModelProperty(example = "null", value = "E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).") + @JsonProperty("type") + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + public ReadinessCheck properties(Map properties) { + this.properties = properties; + return this; + } + + public ReadinessCheck putPropsItem(String key, String propsItem) { + this.properties.put(key, propsItem); + return this; + } + + /** + * A blob of key value pairs that will be used to configure the check. + * @return properties + **/ + @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be used to configure the check.") + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + /** + * Artifact of the pluggable readiness check helper container (optional). If + * specified, this helper container typically hosts the http uri and + * encapsulates the complex scripts required to perform actual container + * readiness check. At the end it is expected to respond a 204 No content just + * like the simplified use case. This pluggable framework benefits service + * owners who can run services without any packaging modifications. Note, + * artifacts of type docker only is supported for now. + **/ + public ReadinessCheck artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReadinessCheck readinessCheck = (ReadinessCheck) o; + return Objects.equals(this.type, readinessCheck.type) && + Objects.equals(this.properties, readinessCheck.properties) && + Objects.equals(this.artifact, readinessCheck.artifact); + } + + @Override + public int hashCode() { + return Objects.hash(type, properties, artifact); + } + + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ReadinessCheck {\n"); + + sb.append(" type: ").append(toIndentedString(type)).append("\n"); + sb.append(" properties: ").append(toIndentedString(properties)).append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java new file mode 100644 index 00000000000..dfdf92a01c5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Resource determines the amount of resources (vcores, memory, network, etc.) + * usable by a container. This field determines the resource to be applied for + * all the containers of a component or service. The resource specified at + * the service (or global) level can be overriden at the component level. Only one + * of profile OR cpu & memory are expected. It raises a validation + * exception otherwise. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class Resource extends BaseResource implements Cloneable { + private static final long serialVersionUID = -6431667797380250037L; + + private String profile = null; + private Integer cpus = 1; + private String memory = null; + + /** + * Each resource profile has a unique id which is associated with a + * cluster-level predefined memory, cpus, etc. + **/ + public Resource profile(String profile) { + this.profile = profile; + return this; + } + + @ApiModelProperty(example = "null", value = "Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.") + @JsonProperty("profile") + public String getProfile() { + return profile; + } + + public void setProfile(String profile) { + this.profile = profile; + } + + /** + * Amount of vcores allocated to each container (optional but overrides cpus + * in profile if specified). + **/ + public Resource cpus(Integer cpus) { + this.cpus = cpus; + return this; + } + + @ApiModelProperty(example = "null", value = "Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).") + @JsonProperty("cpus") + public Integer getCpus() { + return cpus; + } + + public void setCpus(Integer cpus) { + this.cpus = cpus; + } + + /** + * Amount of memory allocated to each container (optional but overrides memory + * in profile if specified). Currently accepts only an integer value and + * default unit is in MB. + **/ + public Resource memory(String memory) { + this.memory = memory; + return this; + } + + @ApiModelProperty(example = "null", value = "Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.") + @JsonProperty("memory") + public String getMemory() { + return memory; + } + + public void setMemory(String memory) { + this.memory = memory; + } + + @JsonIgnore + public long getMemoryMB() { + if (this.memory == null) { + return 0; + } + return Long.parseLong(memory); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Resource resource = (Resource) o; + return Objects.equals(this.profile, resource.profile) + && Objects.equals(this.cpus, resource.cpus) + && Objects.equals(this.memory, resource.memory); + } + + @Override + public int hashCode() { + return Objects.hash(profile, cpus, memory); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Resource {\n"); + + sb.append(" profile: ").append(toIndentedString(profile)).append("\n"); + sb.append(" cpus: ").append(toIndentedString(cpus)).append("\n"); + sb.append(" memory: ").append(toIndentedString(memory)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + @Override + public Object clone() throws CloneNotSupportedException { + return super.clone(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java new file mode 100644 index 00000000000..77a2610fe6c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java @@ -0,0 +1,390 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * An Service resource has the following attributes. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "An Service resource has the following attributes.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonPropertyOrder({ "name", "state", "resource", "number_of_containers", + "lifetime", "containers" }) +public class Service extends BaseResource { + private static final long serialVersionUID = -4491694636566094885L; + + private String name = null; + private String id = null; + private Artifact artifact = null; + private Resource resource = null; + private Date launchTime = null; + private Long numberOfRunningContainers = null; + private Long lifetime = null; + private PlacementPolicy placementPolicy = null; + private List components = new ArrayList<>(); + private Configuration configuration = new Configuration(); + private ServiceState state = null; + private Map quicklinks = new HashMap<>(); + private String queue = null; + + /** + * A unique service name. + **/ + public Service name(String name) { + this.name = name; + return this; + } + + @ApiModelProperty(example = "null", required = true, value = "A unique service name.") + @JsonProperty("name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * A unique service id. + **/ + public Service id(String id) { + this.id = id; + return this; + } + + @ApiModelProperty(example = "null", value = "A unique service id.") + @JsonProperty("id") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * Artifact of single-component services. Mandatory if components + * attribute is not specified. + **/ + public Service artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact of single-component services. Mandatory if components attribute is not specified.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + /** + * Resource of single-component services or the global default for + * multi-component services. Mandatory if it is a single-component + * service and if cpus and memory are not specified at the Service + * level. + **/ + public Service resource(Resource resource) { + this.resource = resource; + return this; + } + + @ApiModelProperty(example = "null", value = "Resource of single-component services or the global default for multi-component services. Mandatory if it is a single-component service and if cpus and memory are not specified at the Service level.") + @JsonProperty("resource") + public Resource getResource() { + return resource; + } + + public void setResource(Resource resource) { + this.resource = resource; + } + + /** + * The time when the service was created, e.g. 2016-03-16T01:01:49.000Z. + **/ + public Service launchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + return this; + } + + @ApiModelProperty(example = "null", value = "The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.") + @JsonProperty("launch_time") + public Date getLaunchTime() { + return launchTime == null ? null : (Date) launchTime.clone(); + } + + @XmlElement(name = "launch_time") + public void setLaunchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + } + + /** + * In get response this provides the total number of running containers for + * this service (across all components) at the time of request. Note, a + * subsequent request can return a different number as and when more + * containers get allocated until it reaches the total number of containers or + * if a flex request has been made between the two requests. + **/ + public Service numberOfRunningContainers(Long numberOfRunningContainers) { + this.numberOfRunningContainers = numberOfRunningContainers; + return this; + } + + @ApiModelProperty(example = "null", value = "In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.") + @JsonProperty("number_of_running_containers") + public Long getNumberOfRunningContainers() { + return numberOfRunningContainers; + } + + @XmlElement(name = "number_of_running_containers") + public void setNumberOfRunningContainers(Long numberOfRunningContainers) { + this.numberOfRunningContainers = numberOfRunningContainers; + } + + /** + * Life time (in seconds) of the service from the time it reaches the + * RUNNING_BUT_UNREADY state (after which it is automatically destroyed by YARN). For + * unlimited lifetime do not set a lifetime value. + **/ + public Service lifetime(Long lifetime) { + this.lifetime = lifetime; + return this; + } + + @ApiModelProperty(example = "null", value = "Life time (in seconds) of the service from the time it reaches the RUNNING_BUT_UNREADY state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.") + @JsonProperty("lifetime") + public Long getLifetime() { + return lifetime; + } + + public void setLifetime(Long lifetime) { + this.lifetime = lifetime; + } + + /** + * Advanced scheduling and placement policies (optional). If not specified, it + * defaults to the default placement policy of the service owner. The design of + * placement policies are in the works. It is not very clear at this point, + * how policies in conjunction with labels be exposed to service owners. + * This is a placeholder for now. The advanced structure of this attribute + * will be determined by YARN-4902. + **/ + public Service placementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + return this; + } + + @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies (optional). If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.") + @JsonProperty("placement_policy") + public PlacementPolicy getPlacementPolicy() { + return placementPolicy; + } + + @XmlElement(name = "placement_policy") + public void setPlacementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + } + + /** + * Components of an service. + **/ + public Service components(List components) { + this.components = components; + return this; + } + + @ApiModelProperty(example = "null", value = "Components of an service.") + @JsonProperty("components") + public List getComponents() { + return components; + } + + public void setComponents(List components) { + this.components = components; + } + + public void addComponent(Component component) { + components.add(component); + } + + public Component getComponent(String name) { + for (Component component : components) { + if (component.getName().equals(name)) { + return component; + } + } + return null; + } + + /** + * Config properties of an service. Configurations provided at the + * service/global level are available to all the components. Specific + * properties can be overridden at the component level. + **/ + public Service configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } + + @ApiModelProperty(example = "null", value = "Config properties of an service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.") + @JsonProperty("configuration") + public Configuration getConfiguration() { + return configuration; + } + + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + + /** + * State of the service. Specifying a value for this attribute for the + * POST payload raises a validation error. This attribute is available only in + * the GET response of a started service. + **/ + public Service state(ServiceState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "State of the service. Specifying a value for this attribute for the POST payload raises a validation error. This attribute is available only in the GET response of a started service.") + @JsonProperty("state") + public ServiceState getState() { + return state; + } + + public void setState(ServiceState state) { + this.state = state; + } + + /** + * A blob of key-value pairs of quicklinks to be exported for an service. + **/ + public Service quicklinks(Map quicklinks) { + this.quicklinks = quicklinks; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key-value pairs of quicklinks to be exported for an service.") + @JsonProperty("quicklinks") + public Map getQuicklinks() { + return quicklinks; + } + + public void setQuicklinks(Map quicklinks) { + this.quicklinks = quicklinks; + } + + /** + * The YARN queue that this service should be submitted to. + **/ + public Service queue(String queue) { + this.queue = queue; + return this; + } + + @ApiModelProperty(example = "null", value = "The YARN queue that this service should be submitted to.") + @JsonProperty("queue") + public String getQueue() { + return queue; + } + + public void setQueue(String queue) { + this.queue = queue; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Service service = (Service) o; + return Objects.equals(this.name, service.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Service {\n"); + + sb.append(" name: ").append(toIndentedString(name)).append("\n"); + sb.append(" id: ").append(toIndentedString(id)).append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append(" resource: ").append(toIndentedString(resource)).append("\n"); + sb.append(" launchTime: ").append(toIndentedString(launchTime)) + .append("\n"); + sb.append(" numberOfRunningContainers: ") + .append(toIndentedString(numberOfRunningContainers)).append("\n"); + sb.append(" lifetime: ").append(toIndentedString(lifetime)).append("\n"); + sb.append(" placementPolicy: ").append(toIndentedString(placementPolicy)) + .append("\n"); + sb.append(" components: ").append(toIndentedString(components)) + .append("\n"); + sb.append(" configuration: ").append(toIndentedString(configuration)) + .append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" quicklinks: ").append(toIndentedString(quicklinks)) + .append("\n"); + sb.append(" queue: ").append(toIndentedString(queue)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceState.java new file mode 100644 index 00000000000..d2f5d060101 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceState.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The current state of an service. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The current state of an service.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public enum ServiceState { + ACCEPTED, STARTED, STABLE, STOPPED, FAILED; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceStatus.java new file mode 100644 index 00000000000..2cee23c8e96 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceStatus.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +import javax.xml.bind.annotation.XmlRootElement; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The current status of a submitted service, returned as a response to the + * GET API. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The current status of a submitted service, returned as a response to the GET API.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ServiceStatus extends BaseResource { + private static final long serialVersionUID = -3469885905347851034L; + + private String diagnostics = null; + private ServiceState state = null; + private Integer code = null; + + /** + * Diagnostic information (if any) for the reason of the current state of the + * service. It typically has a non-null value, if the service is in a + * non-running state. + **/ + public ServiceStatus diagnostics(String diagnostics) { + this.diagnostics = diagnostics; + return this; + } + + @ApiModelProperty(example = "null", value = "Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.") + @JsonProperty("diagnostics") + public String getDiagnostics() { + return diagnostics; + } + + public void setDiagnostics(String diagnostics) { + this.diagnostics = diagnostics; + } + + /** + * Service state. + **/ + public ServiceStatus state(ServiceState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "Service state.") + @JsonProperty("state") + public ServiceState getState() { + return state; + } + + public void setState(ServiceState state) { + this.state = state; + } + + /** + * An error code specific to a scenario which service owners should be able to use + * to understand the failure in addition to the diagnostic information. + **/ + public ServiceStatus code(Integer code) { + this.code = code; + return this; + } + + @ApiModelProperty(example = "null", value = "An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.") + @JsonProperty("code") + public Integer getCode() { + return code; + } + + public void setCode(Integer code) { + this.code = code; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ServiceStatus serviceStatus = (ServiceStatus) o; + return Objects.equals(this.diagnostics, serviceStatus.diagnostics) + && Objects.equals(this.state, serviceStatus.state) + && Objects.equals(this.code, serviceStatus.code); + } + + @Override + public int hashCode() { + return Objects.hash(diagnostics, state, code); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ServiceStatus {\n"); + + sb.append(" diagnostics: ").append(toIndentedString(diagnostics)) + .append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" code: ").append(toIndentedString(code)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java new file mode 100644 index 00000000000..e17c0c4ac2d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.client.ServerProxy; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; + +import java.net.InetSocketAddress; + +import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL; + +public class ClientAMProxy extends ServerProxy{ + + public static T createProxy(final Configuration conf, + final Class protocol, final UserGroupInformation ugi, + final YarnRPC rpc, final InetSocketAddress serverAddress) { + Configuration confClone = new Configuration(conf); + confClone.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + confClone.setInt(CommonConfigurationKeysPublic. + IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 0); + RetryPolicy retryPolicy; + + if (conf.getLong(YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS, 0) == 0) { + // by default no retry + retryPolicy = TRY_ONCE_THEN_FAIL; + } else { + retryPolicy = + createRetryPolicy(conf, YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS, + 15 * 60 * 1000, YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS, + 2 * 1000); + } + return createRetriableProxy(confClone, protocol, ugi, rpc, serverAddress, + retryPolicy); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java new file mode 100644 index 00000000000..11cd30d633c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -0,0 +1,960 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.lang.StringUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.retry.RetryNTimes; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.api.RegistryOperationsFactory; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; + +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.client.api.AppAdminClient; +import org.apache.hadoop.yarn.client.api.YarnClient; +import org.apache.hadoop.yarn.client.api.YarnClientApplication; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; +import org.apache.hadoop.yarn.service.ClientAMProtocol; +import org.apache.hadoop.yarn.service.ServiceMaster; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor; +import org.apache.hadoop.yarn.service.containerlaunch.JavaCommandLineBuilder; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderUtils; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.utils.ZookeeperUtils; +import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.util.Times; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.text.MessageFormat; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.hadoop.yarn.api.records.YarnApplicationState.*; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*; +import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser; +import static org.apache.hadoop.yarn.service.utils.ServiceUtils.*; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class ServiceClient extends AppAdminClient implements SliderExitCodes, + YarnServiceConstants { + private static final Logger LOG = + LoggerFactory.getLogger(ServiceClient.class); + private SliderFileSystem fs; + //TODO disable retry so that client / rest API doesn't block? + protected YarnClient yarnClient; + // Avoid looking up applicationId from fs all the time. + private Map cachedAppIds = new ConcurrentHashMap<>(); + + private RegistryOperations registryClient; + private CuratorFramework curatorClient; + private YarnRPC rpc; + + private static EnumSet terminatedStates = + EnumSet.of(FINISHED, FAILED, KILLED); + private static EnumSet liveStates = + EnumSet.of(NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING); + private static EnumSet preRunningStates = + EnumSet.of(NEW, NEW_SAVING, SUBMITTED, ACCEPTED); + + @Override protected void serviceInit(Configuration configuration) + throws Exception { + fs = new SliderFileSystem(configuration); + yarnClient = YarnClient.createYarnClient(); + rpc = YarnRPC.create(configuration); + addService(yarnClient); + super.serviceInit(configuration); + } + + @Override + protected void serviceStop() throws Exception { + if (registryClient != null) { + registryClient.stop(); + } + super.serviceStop(); + } + + public Service loadAppJsonFromLocalFS(String fileName, String serviceName, + Long lifetime, String queue) throws IOException, YarnException { + File file = new File(fileName); + if (!file.exists() && fileName.equals(file.getName())) { + String examplesDirStr = System.getenv("YARN_SERVICE_EXAMPLES_DIR"); + String[] examplesDirs; + if (examplesDirStr == null) { + String yarnHome = System + .getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key()); + examplesDirs = new String[]{ + yarnHome + "/share/hadoop/yarn/yarn-service-examples", + yarnHome + "/yarn-service-examples" + }; + } else { + examplesDirs = StringUtils.split(examplesDirStr, ":"); + } + for (String dir : examplesDirs) { + file = new File(MessageFormat.format("{0}/{1}/{2}.json", + dir, fileName, fileName)); + if (file.exists()) { + break; + } + // Then look for secondary location. + file = new File(MessageFormat.format("{0}/{1}.json", + dir, fileName)); + if (file.exists()) { + break; + } + } + } + if (!file.exists()) { + throw new YarnException("File or example could not be found: " + + fileName); + } + Path filePath = new Path(file.getAbsolutePath()); + LOG.info("Loading service definition from local FS: " + filePath); + Service service = jsonSerDeser + .load(FileSystem.getLocal(getConfig()), filePath); + if (!StringUtils.isEmpty(serviceName)) { + service.setName(serviceName); + } + if (lifetime != null && lifetime > 0) { + service.setLifetime(lifetime); + } + if (!StringUtils.isEmpty(queue)) { + service.setQueue(queue); + } + return service; + } + + public int actionSave(String fileName, String serviceName, Long lifetime, + String queue) throws IOException, YarnException { + return actionBuild(loadAppJsonFromLocalFS(fileName, serviceName, + lifetime, queue)); + } + + public int actionBuild(Service service) + throws YarnException, IOException { + Path appDir = checkAppNotExistOnHdfs(service); + ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); + createDirAndPersistApp(appDir, service); + return EXIT_SUCCESS; + } + + public int actionLaunch(String fileName, String serviceName, Long lifetime, + String queue) throws IOException, YarnException { + actionCreate(loadAppJsonFromLocalFS(fileName, serviceName, lifetime, + queue)); + return EXIT_SUCCESS; + } + + public ApplicationId actionCreate(Service service) + throws IOException, YarnException { + String serviceName = service.getName(); + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); + verifyNoLiveAppInRM(serviceName, "create"); + Path appDir = checkAppNotExistOnHdfs(service); + + // Write the definition first and then submit - AM will read the definition + createDirAndPersistApp(appDir, service); + ApplicationId appId = submitApp(service); + cachedAppIds.put(serviceName, appId); + service.setId(appId.toString()); + // update app definition with appId + persistAppDef(appDir, service); + return appId; + } + + public int actionFlex(String serviceName, Map + componentCountStrings) throws YarnException, IOException { + Map componentCounts = + new HashMap<>(componentCountStrings.size()); + Service persistedService = + ServiceApiUtil.loadService(fs, serviceName); + if (!StringUtils.isEmpty(persistedService.getId())) { + cachedAppIds.put(persistedService.getName(), + ApplicationId.fromString(persistedService.getId())); + } else { + throw new YarnException(persistedService.getName() + + " appId is null, may be not submitted to YARN yet"); + } + + for (Map.Entry entry : componentCountStrings.entrySet()) { + String compName = entry.getKey(); + ServiceApiUtil.validateNameFormat(compName, getConfig()); + Component component = persistedService.getComponent(compName); + if (component == null) { + throw new IllegalArgumentException(entry.getKey() + " does not exist !"); + } + long numberOfContainers = + parseNumberOfContainers(component, entry.getValue()); + componentCounts.put(compName, numberOfContainers); + } + flexComponents(serviceName, componentCounts, persistedService); + return EXIT_SUCCESS; + } + + // Parse the number of containers requested by user, e.g. + // +5 means add 5 additional containers + // -5 means reduce 5 containers, if it goes to negative, sets it to 0 + // 5 means sets it to 5 containers. + private long parseNumberOfContainers(Component component, String newNumber) { + + long orig = component.getNumberOfContainers(); + if (newNumber.startsWith("+")) { + return orig + Long.parseLong(newNumber.substring(1)); + } else if (newNumber.startsWith("-")) { + long ret = orig - Long.parseLong(newNumber.substring(1)); + if (ret < 0) { + LOG.warn(MessageFormat.format( + "[COMPONENT {}]: component count goes to negative ({}{} = {}), reset it to 0.", + component.getName(), orig, newNumber, ret)); + ret = 0; + } + return ret; + } else { + return Long.parseLong(newNumber); + } + } + + // Called by Rest Service + public Map flexByRestService(String serviceName, + Map componentCounts) throws YarnException, IOException { + // load app definition + Service persistedService = ServiceApiUtil.loadService(fs, serviceName); + if (StringUtils.isEmpty(persistedService.getId())) { + throw new YarnException( + serviceName + " appId is null, may be not submitted to YARN yet"); + } + cachedAppIds.put(persistedService.getName(), + ApplicationId.fromString(persistedService.getId())); + return flexComponents(serviceName, componentCounts, persistedService); + } + + private Map flexComponents(String serviceName, + Map componentCounts, Service persistedService) + throws YarnException, IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + + Map original = new HashMap<>(componentCounts.size()); + + ComponentCountProto.Builder countBuilder = ComponentCountProto.newBuilder(); + FlexComponentsRequestProto.Builder requestBuilder = + FlexComponentsRequestProto.newBuilder(); + + for (Component persistedComp : persistedService.getComponents()) { + String name = persistedComp.getName(); + if (componentCounts.containsKey(persistedComp.getName())) { + original.put(name, persistedComp.getNumberOfContainers()); + persistedComp.setNumberOfContainers(componentCounts.get(name)); + + // build the request + countBuilder.setName(persistedComp.getName()) + .setNumberOfContainers(persistedComp.getNumberOfContainers()); + requestBuilder.addComponents(countBuilder.build()); + } + } + if (original.size() < componentCounts.size()) { + componentCounts.keySet().removeAll(original.keySet()); + throw new YarnException("Components " + componentCounts.keySet() + + " do not exist in app definition."); + } + jsonSerDeser + .save(fs.getFileSystem(), ServiceApiUtil.getServiceJsonPath(fs, serviceName), + persistedService, true); + + ApplicationReport appReport = + yarnClient.getApplicationReport(getAppId(serviceName)); + if (appReport.getYarnApplicationState() != RUNNING) { + String message = + serviceName + " is at " + appReport.getYarnApplicationState() + + " state, flex can only be invoked when service is running"; + LOG.error(message); + throw new YarnException(message); + } + if (StringUtils.isEmpty(appReport.getHost())) { + throw new YarnException(serviceName + " AM hostname is empty"); + } + ClientAMProtocol proxy = + createAMProxy(appReport.getHost(), appReport.getRpcPort()); + proxy.flexComponents(requestBuilder.build()); + for (Map.Entry entry : original.entrySet()) { + LOG.info("[COMPONENT {}]: number of containers changed from {} to {}", + entry.getKey(), entry.getValue(), + componentCounts.get(entry.getKey())); + } + return original; + } + + public int actionStop(String serviceName) + throws YarnException, IOException { + return actionStop(serviceName, true); + } + + public int actionStop(String serviceName, boolean waitForAppStopped) + throws YarnException, IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + ApplicationId currentAppId = getAppId(serviceName); + ApplicationReport report = yarnClient.getApplicationReport(currentAppId); + if (terminatedStates.contains(report.getYarnApplicationState())) { + LOG.info("Service {} is already in a terminated state {}", serviceName, + report.getYarnApplicationState()); + return EXIT_SUCCESS; + } + if (preRunningStates.contains(report.getYarnApplicationState())) { + String msg = serviceName + " is at " + report.getYarnApplicationState() + + ", forcefully killed by user!"; + yarnClient.killApplication(currentAppId, msg); + LOG.info(msg); + return EXIT_SUCCESS; + } + if (StringUtils.isEmpty(report.getHost())) { + throw new YarnException(serviceName + " AM hostname is empty"); + } + LOG.info("Stopping service {}, with appId = {}", serviceName, currentAppId); + try { + ClientAMProtocol proxy = + createAMProxy(report.getHost(), report.getRpcPort()); + cachedAppIds.remove(serviceName); + if (proxy != null) { + // try to stop the app gracefully. + StopRequestProto request = StopRequestProto.newBuilder().build(); + proxy.stop(request); + LOG.info("Service " + serviceName + " is being gracefully stopped..."); + } else { + yarnClient.killApplication(currentAppId, + serviceName + " is forcefully killed by user!"); + LOG.info("Forcefully kill the service: " + serviceName); + return EXIT_SUCCESS; + } + + if (!waitForAppStopped) { + return EXIT_SUCCESS; + } + // Wait until the app is killed. + long startTime = System.currentTimeMillis(); + int pollCount = 0; + while (true) { + Thread.sleep(2000); + report = yarnClient.getApplicationReport(currentAppId); + if (terminatedStates.contains(report.getYarnApplicationState())) { + LOG.info("Service " + serviceName + " is stopped."); + break; + } + // Forcefully kill after 10 seconds. + if ((System.currentTimeMillis() - startTime) > 10000) { + LOG.info("Stop operation timeout stopping, forcefully kill the app " + + serviceName); + yarnClient.killApplication(currentAppId, + "Forcefully kill the app by user"); + break; + } + if (++pollCount % 10 == 0) { + LOG.info("Waiting for service " + serviceName + " to be stopped."); + } + } + } catch (IOException | YarnException | InterruptedException e) { + LOG.info("Failed to stop " + serviceName + + " gracefully, forcefully kill the app."); + yarnClient.killApplication(currentAppId, "Forcefully kill the app"); + } + return EXIT_SUCCESS; + } + + public int actionDestroy(String serviceName) throws YarnException, + IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + verifyNoLiveAppInRM(serviceName, "destroy"); + + Path appDir = fs.buildClusterDirPath(serviceName); + FileSystem fileSystem = fs.getFileSystem(); + // remove from the appId cache + cachedAppIds.remove(serviceName); + if (fileSystem.exists(appDir)) { + if (fileSystem.delete(appDir, true)) { + LOG.info("Successfully deleted service dir for " + serviceName + ": " + + appDir); + } else { + String message = + "Failed to delete service + " + serviceName + " at: " + appDir; + LOG.info(message); + throw new YarnException(message); + } + } + try { + deleteZKNode(serviceName); + } catch (Exception e) { + throw new IOException("Could not delete zk node for " + serviceName, e); + } + String registryPath = ServiceRegistryUtils.registryPathForInstance(serviceName); + try { + getRegistryClient().delete(registryPath, true); + } catch (IOException e) { + LOG.warn("Error deleting registry entry {}", registryPath, e); + } + LOG.info("Destroyed cluster {}", serviceName); + return EXIT_SUCCESS; + } + + private synchronized RegistryOperations getRegistryClient() + throws SliderException, IOException { + + if (registryClient == null) { + registryClient = + RegistryOperationsFactory.createInstance("ServiceClient", getConfig()); + registryClient.init(getConfig()); + registryClient.start(); + } + return registryClient; + } + + private void deleteZKNode(String clusterName) throws Exception { + CuratorFramework curatorFramework = getCuratorClient(); + String user = RegistryUtils.currentUser(); + String zkPath = ServiceRegistryUtils.mkClusterPath(user, clusterName); + if (curatorFramework.checkExists().forPath(zkPath) != null) { + curatorFramework.delete().deletingChildrenIfNeeded().forPath(zkPath); + LOG.info("Deleted zookeeper path: " + zkPath); + } + } + + private synchronized CuratorFramework getCuratorClient() + throws BadConfigException { + String registryQuorum = + getConfig().get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + + // though if neither is set: trouble + if (ServiceUtils.isUnset(registryQuorum)) { + throw new BadConfigException( + "No Zookeeper quorum provided in the" + " configuration property " + + RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + } + ZookeeperUtils.splitToHostsAndPortsStrictly(registryQuorum); + + if (curatorClient == null) { + curatorClient = + CuratorFrameworkFactory.builder().connectString(registryQuorum) + .sessionTimeoutMs(10000).retryPolicy(new RetryNTimes(5, 2000)) + .build(); + curatorClient.start(); + } + return curatorClient; + } + + private void verifyNoLiveAppInRM(String serviceName, String action) + throws IOException, YarnException { + Set types = new HashSet<>(1); + types.add(YarnServiceConstants.APP_TYPE); + Set tags = null; + if (serviceName != null) { + tags = Collections.singleton(ServiceUtils.createNameTag(serviceName)); + } + GetApplicationsRequest request = GetApplicationsRequest.newInstance(); + request.setApplicationTypes(types); + request.setApplicationTags(tags); + request.setApplicationStates(liveStates); + List reports = yarnClient.getApplications(request); + if (!reports.isEmpty()) { + String message = ""; + if (action.equals("destroy")) { + message = "Failed to destroy service " + serviceName + + ", because it is still running."; + } else { + message = "Failed to " + action + " service " + serviceName + + ", because it already exists."; + } + throw new YarnException(message); + } + } + + private ApplicationId submitApp(Service app) + throws IOException, YarnException { + String serviceName = app.getName(); + Configuration conf = getConfig(); + Path appRootDir = fs.buildClusterDirPath(app.getName()); + + YarnClientApplication yarnApp = yarnClient.createApplication(); + ApplicationSubmissionContext submissionContext = + yarnApp.getApplicationSubmissionContext(); + ServiceApiUtil.validateCompResourceSize( + yarnApp.getNewApplicationResponse().getMaximumResourceCapability(), + app); + + submissionContext.setKeepContainersAcrossApplicationAttempts(true); + if (app.getLifetime() > 0) { + Map appTimeout = new HashMap<>(); + appTimeout.put(ApplicationTimeoutType.LIFETIME, app.getLifetime()); + submissionContext.setApplicationTimeouts(appTimeout); + } + submissionContext.setMaxAppAttempts(YarnServiceConf + .getInt(YarnServiceConf.AM_RESTART_MAX, 20, app.getConfiguration(), + conf)); + + setLogAggregationContext(app, conf, submissionContext); + + Map localResources = new HashMap<>(); + + // copy local slideram-log4j.properties to hdfs and add to localResources + boolean hasAMLog4j = + addAMLog4jResource(serviceName, conf, localResources); + // copy jars to hdfs and add to localResources + addJarResource(serviceName, localResources); + // add keytab if in secure env + addKeytabResourceIfSecure(fs, localResources, conf, serviceName); + if (LOG.isDebugEnabled()) { + printLocalResources(localResources); + } + Map env = addAMEnv(); + + // create AM CLI + String cmdStr = buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j); + submissionContext.setResource(Resource.newInstance(YarnServiceConf + .getLong(YarnServiceConf.AM_RESOURCE_MEM, + YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, app.getConfiguration(), + conf), 1)); + String queue = app.getQueue(); + if (StringUtils.isEmpty(queue)) { + queue = conf.get(YARN_QUEUE, "default"); + } + submissionContext.setQueue(queue); + submissionContext.setApplicationName(serviceName); + submissionContext.setApplicationType(YarnServiceConstants.APP_TYPE); + Set appTags = + AbstractClientProvider.createApplicationTags(serviceName, null, null); + if (!appTags.isEmpty()) { + submissionContext.setApplicationTags(appTags); + } + ContainerLaunchContext amLaunchContext = + Records.newRecord(ContainerLaunchContext.class); + amLaunchContext.setCommands(Collections.singletonList(cmdStr)); + amLaunchContext.setEnvironment(env); + amLaunchContext.setLocalResources(localResources); + submissionContext.setAMContainerSpec(amLaunchContext); + yarnClient.submitApplication(submissionContext); + return submissionContext.getApplicationId(); + } + + private void setLogAggregationContext(Service app, Configuration conf, + ApplicationSubmissionContext submissionContext) { + LogAggregationContext context = Records.newRecord(LogAggregationContext + .class); + String finalLogInclude = YarnServiceConf.get + (FINAL_LOG_INCLUSION_PATTERN, null, app.getConfiguration(), conf); + if (!StringUtils.isEmpty(finalLogInclude)) { + context.setIncludePattern(finalLogInclude); + } + String finalLogExclude = YarnServiceConf.get + (FINAL_LOG_EXCLUSION_PATTERN, null, app.getConfiguration(), conf); + if (!StringUtils.isEmpty(finalLogExclude)) { + context.setExcludePattern(finalLogExclude); + } + String rollingLogInclude = YarnServiceConf.get + (ROLLING_LOG_INCLUSION_PATTERN, null, app.getConfiguration(), conf); + if (!StringUtils.isEmpty(rollingLogInclude)) { + context.setRolledLogsIncludePattern(rollingLogInclude); + } + String rollingLogExclude = YarnServiceConf.get + (ROLLING_LOG_EXCLUSION_PATTERN, null, app.getConfiguration(), conf); + if (!StringUtils.isEmpty(rollingLogExclude)) { + context.setRolledLogsExcludePattern(rollingLogExclude); + } + submissionContext.setLogAggregationContext(context); + } + + private void printLocalResources(Map map) { + LOG.debug("Added LocalResource for localization: "); + StringBuilder builder = new StringBuilder(); + for (Map.Entry entry : map.entrySet()) { + builder.append(entry.getKey()).append(" -> ") + .append(entry.getValue().getResource().getFile()) + .append(System.lineSeparator()); + } + LOG.debug(builder.toString()); + } + + private String buildCommandLine(String serviceName, Configuration conf, + Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException { + JavaCommandLineBuilder CLI = new JavaCommandLineBuilder(); + CLI.forceIPv4().headless(); + //TODO CLI.setJVMHeap + //TODO CLI.addJVMOPTS + if (hasSliderAMLog4j) { + CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME); + CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR); + } + CLI.add(ServiceMaster.class.getCanonicalName()); + //TODO debugAM CLI.add(Arguments.ARG_DEBUG) + CLI.add("-" + ServiceMaster.YARNFILE_OPTION, new Path(appRootDir, + serviceName + ".json")); + // pass the registry binding + CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT, + RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); + CLI.addMandatoryConfOption(conf, RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + + // write out the path output + CLI.addOutAndErrFiles(STDOUT_AM, STDERR_AM); + String cmdStr = CLI.build(); + LOG.debug("AM launch command: {}", cmdStr); + return cmdStr; + } + + private Map addAMEnv() throws IOException { + Map env = new HashMap<>(); + ClasspathConstructor classpath = + buildClasspath(YarnServiceConstants.SUBMITTED_CONF_DIR, "lib", fs, getConfig() + .getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)); + env.put("CLASSPATH", classpath.buildClasspath()); + env.put("LANG", "en_US.UTF-8"); + env.put("LC_ALL", "en_US.UTF-8"); + env.put("LANGUAGE", "en_US.UTF-8"); + String jaas = System.getenv("HADOOP_JAAS_DEBUG"); + if (jaas != null) { + env.put("HADOOP_JAAS_DEBUG", jaas); + } + if (!UserGroupInformation.isSecurityEnabled()) { + String userName = UserGroupInformation.getCurrentUser().getUserName(); + LOG.debug("Run as user " + userName); + // HADOOP_USER_NAME env is used by UserGroupInformation when log in + // This env makes AM run as this user + env.put("HADOOP_USER_NAME", userName); + } + LOG.debug("AM env: \n{}", stringifyMap(env)); + return env; + } + + protected Path addJarResource(String serviceName, + Map localResources) + throws IOException, SliderException { + Path libPath = fs.buildClusterDirPath(serviceName); + ProviderUtils + .addProviderJar(localResources, ServiceMaster.class, SERVICE_CORE_JAR, fs, + libPath, "lib", false); + Path dependencyLibTarGzip = fs.getDependencyTarGzip(); + if (fs.isFile(dependencyLibTarGzip)) { + LOG.debug("Loading lib tar from " + fs.getFileSystem().getScheme() + ":/" + + dependencyLibTarGzip); + fs.submitTarGzipAndUpdate(localResources); + } else { + String[] libs = ServiceUtils.getLibDirs(); + LOG.info("Uploading all dependency jars to HDFS. For faster submission of" + + " apps, pre-upload dependency jars to HDFS " + + "using command: yarn app -enableFastLaunch"); + for (String libDirProp : libs) { + ProviderUtils.addAllDependencyJars(localResources, fs, libPath, "lib", + libDirProp); + } + } + return libPath; + } + + private boolean addAMLog4jResource(String serviceName, Configuration conf, + Map localResources) + throws IOException, BadClusterStateException { + boolean hasAMLog4j = false; + String hadoopConfDir = + System.getenv(ApplicationConstants.Environment.HADOOP_CONF_DIR.name()); + if (hadoopConfDir != null) { + File localFile = + new File(hadoopConfDir, YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME); + if (localFile.exists()) { + Path localFilePath = createLocalPath(localFile); + Path appDirPath = fs.buildClusterDirPath(serviceName); + Path remoteConfPath = + new Path(appDirPath, YarnServiceConstants.SUBMITTED_CONF_DIR); + Path remoteFilePath = + new Path(remoteConfPath, YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME); + copy(conf, localFilePath, remoteFilePath); + LocalResource localResource = + fs.createAmResource(remoteConfPath, LocalResourceType.FILE); + localResources.put(localFilePath.getName(), localResource); + hasAMLog4j = true; + } else { + LOG.warn("AM log4j property file doesn't exist: " + localFile); + } + } + return hasAMLog4j; + } + + public int actionStart(String serviceName) throws YarnException, IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + Path appDir = checkAppExistOnHdfs(serviceName); + Service service = ServiceApiUtil.loadService(fs, serviceName); + ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); + // see if it is actually running and bail out; + verifyNoLiveAppInRM(serviceName, "thaw"); + ApplicationId appId = submitApp(service); + service.setId(appId.toString()); + // write app definition on to hdfs + Path appJson = persistAppDef(appDir, service); + LOG.info("Persisted service " + service.getName() + " at " + appJson); + return 0; + } + + private Path checkAppNotExistOnHdfs(Service service) + throws IOException, SliderException { + Path appDir = fs.buildClusterDirPath(service.getName()); + fs.verifyDirectoryNonexistent( + new Path(appDir, service.getName() + ".json")); + return appDir; + } + + private Path checkAppExistOnHdfs(String serviceName) + throws IOException, SliderException { + Path appDir = fs.buildClusterDirPath(serviceName); + fs.verifyPathExists(new Path(appDir, serviceName + ".json")); + return appDir; + } + + private void createDirAndPersistApp(Path appDir, Service service) + throws IOException, SliderException { + FsPermission appDirPermission = new FsPermission("750"); + fs.createWithPermissions(appDir, appDirPermission); + Path appJson = persistAppDef(appDir, service); + LOG.info("Persisted service " + service.getName() + " at " + appJson); + } + + private Path persistAppDef(Path appDir, Service service) throws IOException { + Path appJson = new Path(appDir, service.getName() + ".json"); + jsonSerDeser.save(fs.getFileSystem(), appJson, service, true); + return appJson; + } + + private void addKeytabResourceIfSecure(SliderFileSystem fileSystem, + Map localResource, Configuration conf, + String serviceName) throws IOException, BadConfigException { + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } + String keytabPreInstalledOnHost = + conf.get(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH); + if (StringUtils.isEmpty(keytabPreInstalledOnHost)) { + String amKeytabName = + conf.get(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME); + String keytabDir = conf.get(YarnServiceConf.KEY_HDFS_KEYTAB_DIR); + Path keytabPath = + fileSystem.buildKeytabPath(keytabDir, amKeytabName, serviceName); + if (fileSystem.getFileSystem().exists(keytabPath)) { + LocalResource keytabRes = + fileSystem.createAmResource(keytabPath, LocalResourceType.FILE); + localResource + .put(YarnServiceConstants.KEYTAB_DIR + "/" + amKeytabName, keytabRes); + LOG.info("Adding AM keytab on hdfs: " + keytabPath); + } else { + LOG.warn("No keytab file was found at {}.", keytabPath); + if (conf.getBoolean(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) { + throw new BadConfigException("No keytab file was found at %s.", + keytabPath); + } else { + LOG.warn("The AM will be " + + "started without a kerberos authenticated identity. " + + "The service is therefore not guaranteed to remain " + + "operational beyond 24 hours."); + } + } + } + } + + public String updateLifetime(String serviceName, long lifetime) + throws YarnException, IOException { + ApplicationId currentAppId = getAppId(serviceName); + ApplicationReport report = yarnClient.getApplicationReport(currentAppId); + if (report == null) { + throw new YarnException("Service not found for " + serviceName); + } + ApplicationId appId = report.getApplicationId(); + LOG.info("Updating lifetime of an service: serviceName = " + serviceName + + ", appId = " + appId + ", lifetime = " + lifetime); + Map map = new HashMap<>(); + String newTimeout = + Times.formatISO8601(System.currentTimeMillis() + lifetime * 1000); + map.put(ApplicationTimeoutType.LIFETIME, newTimeout); + UpdateApplicationTimeoutsRequest request = + UpdateApplicationTimeoutsRequest.newInstance(appId, map); + yarnClient.updateApplicationTimeouts(request); + LOG.info( + "Successfully updated lifetime for an service: serviceName = " + serviceName + + ", appId = " + appId + ". New expiry time in ISO8601 format is " + + newTimeout); + return newTimeout; + } + + public ServiceState convertState(FinalApplicationStatus status) { + switch (status) { + case UNDEFINED: + return ServiceState.ACCEPTED; + case FAILED: + case KILLED: + return ServiceState.FAILED; + case ENDED: + case SUCCEEDED: + return ServiceState.STOPPED; + } + return ServiceState.ACCEPTED; + } + + public String getStatusString(String appId) + throws IOException, YarnException { + ApplicationReport appReport = + yarnClient.getApplicationReport(ApplicationId.fromString(appId)); + + if (appReport.getYarnApplicationState() != RUNNING) { + return ""; + } + if (StringUtils.isEmpty(appReport.getHost())) { + return ""; + } + ClientAMProtocol amProxy = + createAMProxy(appReport.getHost(), appReport.getRpcPort()); + GetStatusResponseProto response = + amProxy.getStatus(GetStatusRequestProto.newBuilder().build()); + return response.getStatus(); + } + + public Service getStatus(String serviceName) + throws IOException, YarnException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + ApplicationId currentAppId = getAppId(serviceName); + ApplicationReport appReport = yarnClient.getApplicationReport(currentAppId); + Service appSpec = new Service(); + appSpec.setName(serviceName); + appSpec.setState(convertState(appReport.getFinalApplicationStatus())); + ApplicationTimeout lifetime = + appReport.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME); + if (lifetime != null) { + appSpec.setLifetime(lifetime.getRemainingTime()); + } + + if (appReport.getYarnApplicationState() != RUNNING) { + LOG.info("Service {} is at {} state", serviceName, + appReport.getYarnApplicationState()); + return appSpec; + } + if (StringUtils.isEmpty(appReport.getHost())) { + LOG.warn(serviceName + " AM hostname is empty"); + return appSpec; + } + ClientAMProtocol amProxy = + createAMProxy(appReport.getHost(), appReport.getRpcPort()); + GetStatusResponseProto response = + amProxy.getStatus(GetStatusRequestProto.newBuilder().build()); + appSpec = jsonSerDeser.fromJson(response.getStatus()); + + return appSpec; + } + + public YarnClient getYarnClient() { + return this.yarnClient; + } + + public int enableFastLaunch() throws IOException, YarnException { + return actionDependency(true); + } + + public int actionDependency(boolean overwrite) + throws IOException, YarnException { + String currentUser = RegistryUtils.currentUser(); + LOG.info("Running command as user {}", currentUser); + + Path dependencyLibTarGzip = fs.getDependencyTarGzip(); + + // Check if dependency has already been uploaded, in which case log + // appropriately and exit success (unless overwrite has been requested) + if (fs.isFile(dependencyLibTarGzip) && !overwrite) { + System.out.println(String.format( + "Dependency libs are already uploaded to %s.", dependencyLibTarGzip + .toUri())); + return EXIT_SUCCESS; + } + + String[] libDirs = ServiceUtils.getLibDirs(); + if (libDirs.length > 0) { + File tempLibTarGzipFile = File.createTempFile( + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + "_", + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT); + // copy all jars + tarGzipFolder(libDirs, tempLibTarGzipFile, createJarFilter()); + + LOG.info("Version Info: " + VersionInfo.getBuildVersion()); + fs.copyLocalFileToHdfs(tempLibTarGzipFile, dependencyLibTarGzip, + new FsPermission(YarnServiceConstants.DEPENDENCY_DIR_PERMISSIONS)); + return EXIT_SUCCESS; + } else { + return EXIT_FALSE; + } + } + + protected ClientAMProtocol createAMProxy(String host, int port) + throws IOException { + InetSocketAddress address = + NetUtils.createSocketAddrForHost(host, port); + return ClientAMProxy.createProxy(getConfig(), ClientAMProtocol.class, + UserGroupInformation.getCurrentUser(), rpc, address); + } + + private synchronized ApplicationId getAppId(String serviceName) + throws IOException, YarnException { + if (cachedAppIds.containsKey(serviceName)) { + return cachedAppIds.get(serviceName); + } + Service persistedService = ServiceApiUtil.loadService(fs, serviceName); + if (persistedService == null) { + throw new YarnException("Service " + serviceName + + " doesn't exist on hdfs. Please check if the app exists in RM"); + } + ApplicationId currentAppId = ApplicationId.fromString(persistedService.getId()); + cachedAppIds.put(serviceName, currentAppId); + return currentAppId; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java new file mode 100644 index 00000000000..7208f391a3a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java @@ -0,0 +1,584 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId; +import org.apache.hadoop.yarn.service.ContainerFailureTracker; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.ServiceScheduler; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; +import org.apache.hadoop.yarn.service.ServiceMetrics; +import org.apache.hadoop.yarn.service.provider.ProviderUtils; +import org.apache.hadoop.yarn.state.InvalidStateTransitionException; +import org.apache.hadoop.yarn.state.MultipleArcTransition; +import org.apache.hadoop.yarn.state.SingleArcTransition; +import org.apache.hadoop.yarn.state.StateMachine; +import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.util.Apps; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils; +import org.apache.hadoop.yarn.service.monitor.probe.Probe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.*; +import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*; +import static org.apache.hadoop.yarn.service.component.ComponentEventType.*; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.START; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.STOP; +import static org.apache.hadoop.yarn.service.component.ComponentState.*; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD; + +public class Component implements EventHandler { + private static final Logger LOG = LoggerFactory.getLogger(Component.class); + + private org.apache.hadoop.yarn.service.api.records.Component componentSpec; + private long allocateId; + private Priority priority; + private ServiceMetrics componentMetrics; + private ServiceScheduler scheduler; + private ServiceContext context; + private AMRMClientAsync amrmClient; + private AtomicLong instanceIdCounter = new AtomicLong(); + private Map compInstances = + new ConcurrentHashMap<>(); + // component instances to be assigned with a container + private List pendingInstances = new LinkedList<>(); + private ContainerFailureTracker failureTracker; + private Probe probe; + private final ReentrantReadWriteLock.ReadLock readLock; + private final ReentrantReadWriteLock.WriteLock writeLock; + public int maxContainerFailurePerComp; + // The number of containers failed since last reset. This excludes preempted, + // disk_failed containers etc. This will be reset to 0 periodically. + public AtomicInteger currentContainerFailure = new AtomicInteger(0); + + private StateMachine + stateMachine; + private AsyncDispatcher compInstanceDispatcher; + private static final StateMachineFactory + stateMachineFactory = + new StateMachineFactory( + INIT) + // INIT will only got to FLEXING + .addTransition(INIT, EnumSet.of(STABLE, FLEXING), + FLEX, new FlexComponentTransition()) + // container recovered on AM restart + .addTransition(INIT, INIT, CONTAINER_RECOVERED, + new ContainerRecoveredTransition()) + + // container allocated by RM + .addTransition(FLEXING, FLEXING, CONTAINER_ALLOCATED, + new ContainerAllocatedTransition()) + // container launched on NM + .addTransition(FLEXING, EnumSet.of(STABLE, FLEXING), + CONTAINER_STARTED, new ContainerStartedTransition()) + // container failed while flexing + .addTransition(FLEXING, FLEXING, CONTAINER_COMPLETED, + new ContainerCompletedTransition()) + // Flex while previous flex is still in progress + .addTransition(FLEXING, EnumSet.of(FLEXING, STABLE), FLEX, + new FlexComponentTransition()) + + // container failed while stable + .addTransition(STABLE, FLEXING, CONTAINER_COMPLETED, + new ContainerCompletedTransition()) + // Ignore surplus container + .addTransition(STABLE, STABLE, CONTAINER_ALLOCATED, + new ContainerAllocatedTransition()) + // Flex by user + // For flex up, go to FLEXING state + // For flex down, go to STABLE state + .addTransition(STABLE, EnumSet.of(STABLE, FLEXING), + FLEX, new FlexComponentTransition()) + .installTopology(); + + public Component( + org.apache.hadoop.yarn.service.api.records.Component component, + long allocateId, ServiceContext context) { + this.allocateId = allocateId; + this.priority = Priority.newInstance((int) allocateId); + this.componentSpec = component; + componentMetrics = ServiceMetrics.register(component.getName(), + "Metrics for component " + component.getName()); + componentMetrics + .tag("type", "Metrics type [component or service]", "component"); + this.scheduler = context.scheduler; + this.context = context; + amrmClient = scheduler.getAmRMClient(); + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + this.readLock = lock.readLock(); + this.writeLock = lock.writeLock(); + this.stateMachine = stateMachineFactory.make(this); + compInstanceDispatcher = scheduler.getCompInstanceDispatcher(); + failureTracker = + new ContainerFailureTracker(context, this); + probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck()); + maxContainerFailurePerComp = componentSpec.getConfiguration() + .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10); + createNumCompInstances(component.getNumberOfContainers()); + } + + private void createNumCompInstances(long count) { + for (int i = 0; i < count; i++) { + createOneCompInstance(); + } + } + + private void createOneCompInstance() { + ComponentInstanceId id = + new ComponentInstanceId(instanceIdCounter.getAndIncrement(), + componentSpec.getName()); + ComponentInstance instance = new ComponentInstance(this, id); + compInstances.put(instance.getCompInstanceName(), instance); + pendingInstances.add(instance); + } + + private static class FlexComponentTransition implements + MultipleArcTransition { + // For flex up, go to FLEXING state + // For flex down, go to STABLE state + @Override + public ComponentState transition(Component component, + ComponentEvent event) { + component.setDesiredContainers((int)event.getDesired()); + if (!component.areDependenciesReady()) { + LOG.info("[FLEX COMPONENT {}]: Flex deferred because dependencies not" + + " satisfied.", component.getName()); + return component.getState(); + } + if (component.getState() == INIT) { + // This happens on init + LOG.info("[INIT COMPONENT " + component.getName() + "]: " + event + .getDesired() + " instances."); + component.requestContainers(component.pendingInstances.size()); + return checkIfStable(component); + } + long before = component.getComponentSpec().getNumberOfContainers(); + long delta = event.getDesired() - before; + component.getComponentSpec().setNumberOfContainers(event.getDesired()); + if (delta > 0) { + // Scale up + LOG.info("[FLEX UP COMPONENT " + component.getName() + "]: scaling up from " + + before + " to " + event.getDesired()); + component.requestContainers(delta); + component.createNumCompInstances(delta); + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING); + return FLEXING; + } else if (delta < 0){ + delta = 0 - delta; + // scale down + LOG.info("[FLEX DOWN COMPONENT " + component.getName() + + "]: scaling down from " + before + " to " + event.getDesired()); + List list = + new ArrayList<>(component.getAllComponentInstances()); + + // sort in Most recent -> oldest order, destroy most recent ones. + list.sort(Collections.reverseOrder()); + for (int i = 0; i < delta; i++) { + ComponentInstance instance = list.get(i); + // remove the instance + component.compInstances.remove(instance.getCompInstanceName()); + component.pendingInstances.remove(instance); + // decrement id counter + component.instanceIdCounter.decrementAndGet(); + instance.destroy(); + } + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE); + return STABLE; + } else { + LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " + + event.getDesired() + " instances, ignoring"); + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE); + return STABLE; + } + } + } + + private static class ContainerAllocatedTransition extends BaseTransition { + @Override + public void transition(Component component, ComponentEvent event) { + component.assignContainerToCompInstance(event.getContainer()); + } + } + + private static class ContainerRecoveredTransition extends BaseTransition { + @Override + public void transition(Component component, ComponentEvent event) { + ComponentInstance instance = event.getInstance(); + Container container = event.getContainer(); + if (instance == null) { + LOG.info("[COMPONENT {}]: Trying to recover {} but event did not " + + "specify component instance", + component.getName(), container.getId()); + component.releaseContainer(container); + return; + } + if (instance.hasContainer()) { + LOG.info( + "[COMPONENT {}]: Instance {} already has container, release " + + "surplus container {}", + instance.getCompName(), instance.getCompInstanceId(), container + .getId()); + component.releaseContainer(container); + return; + } + component.pendingInstances.remove(instance); + LOG.info("[COMPONENT {}]: Recovered {} for component instance {} on " + + "host {}, num pending component instances reduced to {} ", + component.getName(), container.getId(), instance + .getCompInstanceName(), container.getNodeId(), component + .pendingInstances.size()); + instance.setContainer(container); + ProviderUtils.initCompInstanceDir(component.getContext().fs, instance); + component.getScheduler().addLiveCompInstance(container.getId(), instance); + LOG.info("[COMPONENT {}]: Marking {} as started for component " + + "instance {}", component.getName(), event.getContainer().getId(), + instance.getCompInstanceId()); + component.compInstanceDispatcher.getEventHandler().handle( + new ComponentInstanceEvent(instance.getContainerId(), + START)); + } + } + + private static class ContainerStartedTransition implements + MultipleArcTransition { + + @Override public ComponentState transition(Component component, + ComponentEvent event) { + component.compInstanceDispatcher.getEventHandler().handle( + new ComponentInstanceEvent(event.getInstance().getContainerId(), + START)); + return checkIfStable(component); + } + } + + private static ComponentState checkIfStable(Component component) { + // if desired == running + if (component.componentMetrics.containersRunning.value() == component + .getComponentSpec().getNumberOfContainers()) { + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE); + return STABLE; + } else { + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING); + return FLEXING; + } + } + + private static class ContainerCompletedTransition extends BaseTransition { + @Override + public void transition(Component component, ComponentEvent event) { + component.updateMetrics(event.getStatus()); + + // add back to pending list + component.pendingInstances.add(event.getInstance()); + LOG.info( + "[COMPONENT {}]: {} completed, num pending comp instances increased to {}.", + component.getName(), event.getStatus().getContainerId(), + component.pendingInstances.size()); + component.compInstanceDispatcher.getEventHandler().handle( + new ComponentInstanceEvent(event.getStatus().getContainerId(), + STOP).setStatus(event.getStatus())); + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING); + } + } + + public ServiceMetrics getCompMetrics () { + return componentMetrics; + } + + private void releaseContainer(Container container) { + scheduler.getAmRMClient().releaseAssignedContainer(container.getId()); + componentMetrics.surplusContainers.incr(); + scheduler.getServiceMetrics().surplusContainers.incr(); + } + + private void assignContainerToCompInstance(Container container) { + if (pendingInstances.size() == 0) { + LOG.info( + "[COMPONENT {}]: No pending component instance left, release surplus container {}", + getName(), container.getId()); + releaseContainer(container); + return; + } + ComponentInstance instance = pendingInstances.remove(0); + LOG.info( + "[COMPONENT {}]: {} allocated, num pending component instances reduced to {}", + getName(), container.getId(), pendingInstances.size()); + instance.setContainer(container); + scheduler.addLiveCompInstance(container.getId(), instance); + LOG.info( + "[COMPONENT {}]: Assigned {} to component instance {} and launch on host {} ", + getName(), container.getId(), instance.getCompInstanceName(), + container.getNodeId()); + scheduler.getContainerLaunchService() + .launchCompInstance(scheduler.getApp(), instance, container); + } + + @SuppressWarnings({ "unchecked" }) + public void requestContainers(long count) { + Resource resource = Resource + .newInstance(componentSpec.getResource().getMemoryMB(), + componentSpec.getResource().getCpus()); + + for (int i = 0; i < count; i++) { + //TODO Once YARN-5468 is done, use that for anti-affinity + ContainerRequest request = + ContainerRequest.newBuilder().capability(resource).priority(priority) + .allocationRequestId(allocateId).relaxLocality(true).build(); + amrmClient.addContainerRequest(request); + } + } + + private void setDesiredContainers(int n) { + int delta = n - scheduler.getServiceMetrics().containersDesired.value(); + if (delta > 0) { + scheduler.getServiceMetrics().containersDesired.incr(delta); + } else { + scheduler.getServiceMetrics().containersDesired.decr(delta); + } + componentMetrics.containersDesired.set(n); + } + + + + private void updateMetrics(ContainerStatus status) { + switch (status.getExitStatus()) { + case SUCCESS: + componentMetrics.containersSucceeded.incr(); + scheduler.getServiceMetrics().containersSucceeded.incr(); + return; + case PREEMPTED: + componentMetrics.containersPreempted.incr(); + scheduler.getServiceMetrics().containersPreempted.incr(); + break; + case DISKS_FAILED: + componentMetrics.containersDiskFailure.incr(); + scheduler.getServiceMetrics().containersDiskFailure.incr(); + break; + default: + break; + } + + // containersFailed include preempted, disks_failed etc. + componentMetrics.containersFailed.incr(); + scheduler.getServiceMetrics().containersFailed.incr(); + + if (Apps.shouldCountTowardsNodeBlacklisting(status.getExitStatus())) { + String host = scheduler.getLiveInstances().get(status.getContainerId()) + .getNodeId().getHost(); + failureTracker.incNodeFailure(host); + currentContainerFailure.getAndIncrement() ; + } + } + + public boolean areDependenciesReady() { + List dependencies = componentSpec.getDependencies(); + if (ServiceUtils.isEmpty(dependencies)) { + return true; + } + for (String dependency : dependencies) { + Component dependentComponent = + scheduler.getAllComponents().get(dependency); + if (dependentComponent == null) { + LOG.error("Couldn't find dependency {} for {} (should never happen)", + dependency, getName()); + continue; + } + if (dependentComponent.getNumReadyInstances() < dependentComponent + .getNumDesiredInstances()) { + LOG.info("[COMPONENT {}]: Dependency {} not satisfied, only {} of {}" + + " instances are ready.", getName(), dependency, + dependentComponent.getNumReadyInstances(), + dependentComponent.getNumDesiredInstances()); + return false; + } + } + return true; + } + + public Map getDependencyHostIpTokens() { + Map tokens = new HashMap<>(); + List dependencies = componentSpec.getDependencies(); + if (ServiceUtils.isEmpty(dependencies)) { + return tokens; + } + for (String dependency : dependencies) { + Collection instances = scheduler.getAllComponents() + .get(dependency).getAllComponentInstances(); + for (ComponentInstance instance : instances) { + if (instance.getContainerStatus() == null) { + continue; + } + if (ServiceUtils.isEmpty(instance.getContainerStatus().getIPs()) || + ServiceUtils.isUnset(instance.getContainerStatus().getHost())) { + continue; + } + String ip = instance.getContainerStatus().getIPs().get(0); + String host = instance.getContainerStatus().getHost(); + tokens.put(String.format(COMPONENT_INSTANCE_IP, + instance.getCompInstanceName().toUpperCase()), ip); + tokens.put(String.format(COMPONENT_INSTANCE_HOST, + instance.getCompInstanceName().toUpperCase()), host); + } + } + return tokens; + } + + public void incRunningContainers() { + componentMetrics.containersRunning.incr(); + scheduler.getServiceMetrics().containersRunning.incr(); + } + + public void decRunningContainers() { + componentMetrics.containersRunning.decr(); + scheduler.getServiceMetrics().containersRunning.decr(); + } + + public void incContainersReady() { + componentMetrics.containersReady.incr(); + scheduler.getServiceMetrics().containersReady.incr(); + } + + public void decContainersReady() { + componentMetrics.containersReady.decr(); + scheduler.getServiceMetrics().containersReady.decr(); + } + + public int getNumReadyInstances() { + return componentMetrics.containersReady.value(); + } + + public int getNumRunningInstances() { + return componentMetrics.containersRunning.value(); + } + + public int getNumDesiredInstances() { + return componentMetrics.containersDesired.value(); + } + + public ComponentInstance getComponentInstance(String componentInstanceName) { + return compInstances.get(componentInstanceName); + } + + public Collection getAllComponentInstances() { + return compInstances.values(); + } + + public org.apache.hadoop.yarn.service.api.records.Component getComponentSpec() { + return this.componentSpec; + } + + public void resetCompFailureCount() { + LOG.info("[COMPONENT {}]: Reset container failure count from {} to 0.", + getName(), currentContainerFailure.get()); + currentContainerFailure.set(0); + failureTracker.resetContainerFailures(); + } + + public Probe getProbe() { + return probe; + } + + public Priority getPriority() { + return priority; + } + + public long getAllocateId() { + return allocateId; + } + + public String getName () { + return componentSpec.getName(); + } + + public ComponentState getState() { + this.readLock.lock(); + + try { + return this.stateMachine.getCurrentState(); + } finally { + this.readLock.unlock(); + } + } + public ServiceScheduler getScheduler() { + return scheduler; + } + + @Override + public void handle(ComponentEvent event) { + try { + writeLock.lock(); + ComponentState oldState = getState(); + try { + stateMachine.doTransition(event.getType(), event); + } catch (InvalidStateTransitionException e) { + LOG.error(MessageFormat.format("[COMPONENT {0}]: Invalid event {1} at {2}", + componentSpec.getName(), event.getType(), oldState), e); + } + if (oldState != getState()) { + LOG.info("[COMPONENT {}] Transitioned from {} to {} on {} event.", + componentSpec.getName(), oldState, getState(), event.getType()); + } + } finally { + writeLock.unlock(); + } + } + + private static class BaseTransition implements + SingleArcTransition { + + @Override public void transition(Component component, + ComponentEvent event) { + } + } + + public ServiceContext getContext() { + return context; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java new file mode 100644 index 00000000000..d93dcf153a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.event.AbstractEvent; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; + +public class ComponentEvent extends AbstractEvent { + private long desired; + private final String name; + private final ComponentEventType type; + private Container container; + private ComponentInstance instance; + private ContainerStatus status; + + public ComponentEvent(String name, ComponentEventType type) { + super(type); + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public ComponentEventType getType() { + return type; + } + + public long getDesired() { + return desired; + } + + public ComponentEvent setDesired(long desired) { + this.desired = desired; + return this; + } + + public Container getContainer() { + return container; + } + + public ComponentEvent setContainer(Container container) { + this.container = container; + return this; + } + + public ComponentInstance getInstance() { + return instance; + } + + public ComponentEvent setInstance(ComponentInstance instance) { + this.instance = instance; + return this; + } + + public ContainerStatus getStatus() { + return status; + } + + public ComponentEvent setStatus(ContainerStatus status) { + this.status = status; + return this; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEventType.java new file mode 100644 index 00000000000..067302de6a4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEventType.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +public enum ComponentEventType { + FLEX, + CONTAINER_ALLOCATED, + CONTAINER_RECOVERED, + CONTAINER_STARTED, + CONTAINER_COMPLETED +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentState.java new file mode 100644 index 00000000000..a5f9ff4693a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentState.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +public enum ComponentState { + INIT, + FLEXING, + STABLE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java new file mode 100644 index 00000000000..9e5f98cf342 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java @@ -0,0 +1,549 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.client.api.NMClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.service.ServiceScheduler; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.state.InvalidStateTransitionException; +import org.apache.hadoop.yarn.state.SingleArcTransition; +import org.apache.hadoop.yarn.state.StateMachine; +import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.util.BoundedAppender; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher; +import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus; +import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Date; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import static org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes.*; +import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_BY_APPMASTER; +import static org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.*; + +public class ComponentInstance implements EventHandler, + Comparable { + private static final Logger LOG = + LoggerFactory.getLogger(ComponentInstance.class); + + private StateMachine stateMachine; + private Component component; + private final ReadLock readLock; + private final WriteLock writeLock; + + private ComponentInstanceId compInstanceId = null; + private Path compInstanceDir; + private Container container; + private YarnRegistryViewForProviders yarnRegistryOperations; + private FileSystem fs; + private boolean timelineServiceEnabled = false; + private ServiceTimelinePublisher serviceTimelinePublisher; + private ServiceScheduler scheduler; + private BoundedAppender diagnostics = new BoundedAppender(64 * 1024); + private volatile ScheduledFuture containerStatusFuture; + private volatile ContainerStatus status; + private long containerStartedTime = 0; + // This container object is used for rest API query + private org.apache.hadoop.yarn.service.api.records.Container containerSpec; + + private static final StateMachineFactory + stateMachineFactory = + new StateMachineFactory(INIT) + .addTransition(INIT, STARTED, START, + new ContainerStartedTransition()) + .addTransition(INIT, INIT, STOP, + // container failed before launching, nothing to cleanup from registry + // This could happen if NMClient#startContainerAsync failed, container + // will be completed, but COMP_INSTANCE is still at INIT. + new ContainerStoppedTransition(true)) + + //From Running + .addTransition(STARTED, INIT, STOP, + new ContainerStoppedTransition()) + .addTransition(STARTED, READY, BECOME_READY, + new ContainerBecomeReadyTransition()) + + // FROM READY + .addTransition(READY, STARTED, BECOME_NOT_READY, + new ContainerBecomeNotReadyTransition()) + .addTransition(READY, INIT, STOP, new ContainerStoppedTransition()) + .installTopology(); + + + + public ComponentInstance(Component component, + ComponentInstanceId compInstanceId) { + this.stateMachine = stateMachineFactory.make(this); + this.component = component; + this.compInstanceId = compInstanceId; + this.scheduler = component.getScheduler(); + this.yarnRegistryOperations = + component.getScheduler().getYarnRegistryOperations(); + this.serviceTimelinePublisher = + component.getScheduler().getServiceTimelinePublisher(); + if (YarnConfiguration + .timelineServiceV2Enabled(component.getScheduler().getConfig())) { + this.timelineServiceEnabled = true; + } + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + this.readLock = lock.readLock(); + this.writeLock = lock.writeLock(); + this.fs = scheduler.getContext().fs.getFileSystem(); + } + + private static class ContainerStartedTransition extends BaseTransition { + @Override public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + // Query container status for ip and host + compInstance.containerStatusFuture = + compInstance.scheduler.executorService.scheduleAtFixedRate( + new ContainerStatusRetriever(compInstance.scheduler, + compInstance.getContainerId(), compInstance), 0, 1, + TimeUnit.SECONDS); + compInstance.component.incRunningContainers(); + long containerStartTime = System.currentTimeMillis(); + try { + ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils + .newContainerTokenIdentifier(compInstance.getContainer() + .getContainerToken()); + containerStartTime = containerTokenIdentifier.getCreationTime(); + } catch (Exception e) { + LOG.info("Could not get container creation time, using current time"); + } + org.apache.hadoop.yarn.service.api.records.Container container = + new org.apache.hadoop.yarn.service.api.records.Container(); + container.setId(compInstance.getContainerId().toString()); + container.setLaunchTime(new Date(containerStartTime)); + container.setState(ContainerState.RUNNING_BUT_UNREADY); + container.setBareHost(compInstance.container.getNodeId().getHost()); + container.setComponentInstanceName(compInstance.getCompInstanceName()); + if (compInstance.containerSpec != null) { + // remove the previous container. + compInstance.getCompSpec().removeContainer(compInstance.containerSpec); + } + compInstance.containerSpec = container; + compInstance.getCompSpec().addContainer(container); + compInstance.containerStartedTime = containerStartTime; + + if (compInstance.timelineServiceEnabled) { + compInstance.serviceTimelinePublisher + .componentInstanceStarted(container, compInstance); + } + } + } + + private static class ContainerBecomeReadyTransition extends BaseTransition { + @Override + public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + compInstance.component.incContainersReady(); + compInstance.containerSpec.setState(ContainerState.READY); + if (compInstance.timelineServiceEnabled) { + compInstance.serviceTimelinePublisher + .componentInstanceBecomeReady(compInstance.containerSpec); + } + } + } + + private static class ContainerBecomeNotReadyTransition extends BaseTransition { + @Override + public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + compInstance.component.decContainersReady(); + compInstance.containerSpec.setState(ContainerState.RUNNING_BUT_UNREADY); + } + } + + private static class ContainerStoppedTransition extends BaseTransition { + // whether the container failed before launched by AM or not. + boolean failedBeforeLaunching = false; + public ContainerStoppedTransition(boolean failedBeforeLaunching) { + this.failedBeforeLaunching = failedBeforeLaunching; + } + + public ContainerStoppedTransition() { + this(false); + } + + @Override + public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + // re-ask the failed container. + Component comp = compInstance.component; + comp.requestContainers(1); + LOG.info(compInstance.getCompInstanceId() + + ": Container completed. Requested a new container." + System + .lineSeparator() + " exitStatus={}, diagnostics={}.", + event.getStatus().getExitStatus(), + event.getStatus().getDiagnostics()); + String containerDiag = + compInstance.getCompInstanceId() + ": " + event.getStatus() + .getDiagnostics(); + compInstance.diagnostics.append(containerDiag + System.lineSeparator()); + + if (compInstance.getState().equals(READY)) { + compInstance.component.decContainersReady(); + } + compInstance.component.decRunningContainers(); + boolean shouldExit = false; + // check if it exceeds the failure threshold + if (comp.currentContainerFailure.get() > comp.maxContainerFailurePerComp) { + String exitDiag = MessageFormat.format( + "[COMPONENT {0}]: Failed {1} times, exceeded the limit - {2}. Shutting down now... " + + System.lineSeparator(), + comp.getName(), comp.currentContainerFailure.get(), comp.maxContainerFailurePerComp); + compInstance.diagnostics.append(exitDiag); + // append to global diagnostics that will be reported to RM. + comp.getScheduler().getDiagnostics().append(containerDiag); + comp.getScheduler().getDiagnostics().append(exitDiag); + LOG.warn(exitDiag); + shouldExit = true; + } + + if (!failedBeforeLaunching) { + // clean up registry + // If the container failed before launching, no need to cleanup registry, + // because it was not registered before. + // hdfs dir content will be overwritten when a new container gets started, + // so no need remove. + compInstance.scheduler.executorService + .submit(compInstance::cleanupRegistry); + if (compInstance.timelineServiceEnabled) { + // record in ATS + compInstance.serviceTimelinePublisher.componentInstanceFinished + (compInstance, event.getStatus().getExitStatus(), containerDiag); + } + compInstance.containerSpec.setState(ContainerState.STOPPED); + } + + // remove the failed ContainerId -> CompInstance mapping + comp.getScheduler().removeLiveCompInstance(event.getContainerId()); + + if (shouldExit) { + // Sleep for 5 seconds in hope that the state can be recorded in ATS. + // in case there's a client polling the comp state, it can be notified. + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + LOG.error("Interrupted on sleep while exiting.", e); + } + ExitUtil.terminate(-1); + } + + compInstance.removeContainer(); + } + } + + public ComponentInstanceState getState() { + this.readLock.lock(); + + try { + return this.stateMachine.getCurrentState(); + } finally { + this.readLock.unlock(); + } + } + + @Override + public void handle(ComponentInstanceEvent event) { + try { + writeLock.lock(); + ComponentInstanceState oldState = getState(); + try { + stateMachine.doTransition(event.getType(), event); + } catch (InvalidStateTransitionException e) { + LOG.error(getCompInstanceId() + ": Invalid event " + event.getType() + + " at " + oldState, e); + } + if (oldState != getState()) { + LOG.info(getCompInstanceId() + " Transitioned from " + oldState + " to " + + getState() + " on " + event.getType() + " event"); + } + } finally { + writeLock.unlock(); + } + } + + public boolean hasContainer() { + return this.container != null; + } + + public void removeContainer() { + this.container = null; + this.compInstanceId.setContainerId(null); + } + + public void setContainer(Container container) { + this.container = container; + this.compInstanceId.setContainerId(container.getId()); + } + + public String getCompInstanceName() { + return compInstanceId.getCompInstanceName(); + } + + public ContainerStatus getContainerStatus() { + return status; + } + + public void updateContainerStatus(ContainerStatus status) { + this.status = status; + org.apache.hadoop.yarn.service.api.records.Container container = + getCompSpec().getContainer(getContainerId().toString()); + if (container != null) { + container.setIp(StringUtils.join(",", status.getIPs())); + container.setHostname(status.getHost()); + if (timelineServiceEnabled) { + serviceTimelinePublisher.componentInstanceIPHostUpdated(container); + } + } + updateServiceRecord(yarnRegistryOperations, status); + } + + public ContainerId getContainerId() { + return container.getId(); + } + + public String getCompName() { + return compInstanceId.getCompName(); + } + + public void setCompInstanceDir(Path dir) { + this.compInstanceDir = dir; + } + + public Component getComponent() { + return component; + } + + public Container getContainer() { + return container; + } + + public ComponentInstanceId getCompInstanceId() { + return compInstanceId; + } + + public NodeId getNodeId() { + return this.container.getNodeId(); + } + + public org.apache.hadoop.yarn.service.api.records.Component getCompSpec() { + return component.getComponentSpec(); + } + + private static class BaseTransition implements + SingleArcTransition { + + @Override public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + } + } + + public ProbeStatus ping() { + if (component.getProbe() == null) { + ProbeStatus status = new ProbeStatus(); + status.setSuccess(true); + return status; + } + return component.getProbe().ping(this); + } + + // Write service record into registry + private void updateServiceRecord( + YarnRegistryViewForProviders yarnRegistry, ContainerStatus status) { + ServiceRecord record = new ServiceRecord(); + String containerId = status.getContainerId().toString(); + record.set(YARN_ID, containerId); + record.description = getCompInstanceName(); + record.set(YARN_PERSISTENCE, PersistencePolicies.CONTAINER); + record.set(YARN_IP, status.getIPs().get(0)); + record.set(YARN_HOSTNAME, status.getHost()); + try { + yarnRegistry + .putComponent(RegistryPathUtils.encodeYarnID(containerId), record); + } catch (IOException e) { + LOG.error( + "Failed to update service record in registry: " + containerId + ""); + } + } + + // Called when user flexed down the container and ContainerStoppedTransition + // is not executed in this case. + // Release the container, dec running, + // cleanup registry, hdfs dir, and send record to ATS + public void destroy() { + LOG.info(getCompInstanceId() + ": Flexed down by user, destroying."); + diagnostics.append(getCompInstanceId() + ": Flexed down by user"); + if (container != null) { + scheduler.removeLiveCompInstance(container.getId()); + component.getScheduler().getAmRMClient() + .releaseAssignedContainer(container.getId()); + getCompSpec().removeContainer(containerSpec); + } + // update metrics + if (getState() == STARTED) { + component.decRunningContainers(); + } + if (getState() == READY) { + component.decContainersReady(); + component.decRunningContainers(); + } + + if (timelineServiceEnabled) { + serviceTimelinePublisher.componentInstanceFinished(this, + KILLED_BY_APPMASTER, diagnostics.toString()); + } + scheduler.executorService.submit(this::cleanupRegistryAndCompHdfsDir); + } + + private void cleanupRegistry() { + ContainerId containerId = getContainerId(); + String cid = RegistryPathUtils.encodeYarnID(containerId.toString()); + try { + yarnRegistryOperations.deleteComponent(getCompInstanceId(), cid); + } catch (IOException e) { + LOG.error(getCompInstanceId() + ": Failed to delete registry", e); + } + } + + //TODO Maybe have a dedicated cleanup service. + public void cleanupRegistryAndCompHdfsDir() { + cleanupRegistry(); + try { + if (compInstanceDir != null && fs.exists(compInstanceDir)) { + boolean deleted = fs.delete(compInstanceDir, true); + if (!deleted) { + LOG.error(getCompInstanceId() + + ": Failed to delete component instance dir: " + + compInstanceDir); + } else { + LOG.info(getCompInstanceId() + ": Deleted component instance dir: " + + compInstanceDir); + } + } + } catch (IOException e) { + LOG.warn(getCompInstanceId() + ": Failed to delete directory", e); + } + } + + // Query container status until ip and hostname are available and update + // the service record into registry service + private static class ContainerStatusRetriever implements Runnable { + private ContainerId containerId; + private NodeId nodeId; + private NMClient nmClient; + private ComponentInstance instance; + ContainerStatusRetriever(ServiceScheduler scheduler, + ContainerId containerId, ComponentInstance instance) { + this.containerId = containerId; + this.nodeId = instance.getNodeId(); + this.nmClient = scheduler.getNmClient().getClient(); + this.instance = instance; + } + @Override public void run() { + ContainerStatus status = null; + try { + status = nmClient.getContainerStatus(containerId, nodeId); + } catch (Exception e) { + if (e instanceof YarnException) { + throw new YarnRuntimeException( + instance.compInstanceId + " Failed to get container status on " + + nodeId + " , cancelling.", e); + } + LOG.error(instance.compInstanceId + " Failed to get container status on " + + nodeId + ", will try again", e); + return; + } + if (ServiceUtils.isEmpty(status.getIPs()) || ServiceUtils + .isUnset(status.getHost())) { + return; + } + instance.updateContainerStatus(status); + LOG.info( + instance.compInstanceId + " IP = " + status.getIPs() + ", host = " + + status.getHost() + ", cancel container status retriever"); + instance.containerStatusFuture.cancel(false); + } + } + + @Override + public int compareTo(ComponentInstance to) { + long delta = containerStartedTime - to.containerStartedTime; + if (delta == 0) { + return getCompInstanceId().compareTo(to.getCompInstanceId()); + } else if (delta < 0) { + return -1; + } else { + return 1; + } + } + + @Override public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ComponentInstance instance = (ComponentInstance) o; + + if (containerStartedTime != instance.containerStartedTime) + return false; + return compInstanceId.equals(instance.compInstanceId); + } + + @Override public int hashCode() { + int result = compInstanceId.hashCode(); + result = 31 * result + (int) (containerStartedTime ^ (containerStartedTime + >>> 32)); + return result; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java new file mode 100644 index 00000000000..707b0349655 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.event.AbstractEvent; + +public class ComponentInstanceEvent + extends AbstractEvent { + + private ContainerId id; + private ContainerStatus status; + private boolean shouldDestroy = false; + + public ComponentInstanceEvent(ContainerId containerId, + ComponentInstanceEventType componentInstanceEventType) { + super(componentInstanceEventType); + this.id = containerId; + } + + public ContainerId getContainerId() { + return id; + } + + public ContainerStatus getStatus() { + return this.status; + } + + public ComponentInstanceEvent setStatus(ContainerStatus status) { + this.status = status; + return this; + } + + public void setShouldDestroy() { + shouldDestroy = true; + } + + public boolean shouldDestroy() { + return shouldDestroy; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEventType.java new file mode 100644 index 00000000000..1a880ba4426 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEventType.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +public enum ComponentInstanceEventType { + START, + STOP, + BECOME_READY, + BECOME_NOT_READY +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceId.java new file mode 100644 index 00000000000..14387ba0832 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceId.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +import org.apache.hadoop.yarn.api.records.ContainerId; + +public class ComponentInstanceId implements Comparable { + + private long Id; + private String name; + private ContainerId containerId; + + public ComponentInstanceId(long id, String name) { + Id = id; + this.name = name; + } + + public long getId() { + return Id; + } + + public String getCompName() { + return name; + } + + public String getCompInstanceName() { + return getCompName() + "-" + getId(); + } + + public void setContainerId(ContainerId containerId) { + this.containerId = containerId; + } + + @Override + public String toString() { + if (containerId == null) { + return "[COMPINSTANCE " + getCompInstanceName() + "]"; + } else { + return "[COMPINSTANCE " + getCompInstanceName() + " : " + containerId + "]"; + } + } + + @Override public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ComponentInstanceId that = (ComponentInstanceId) o; + + if (getId() != that.getId()) + return false; + return getCompName() != null ? getCompName().equals(that.getCompName()) : + that.getCompName() == null; + + } + + @Override public int hashCode() { + int result = (int) (getId() ^ (getId() >>> 32)); + result = 31 * result + (getCompName() != null ? getCompName().hashCode() : 0); + return result; + } + + @Override + public int compareTo(ComponentInstanceId to) { + int delta = this.getCompName().compareTo(to.getCompName()); + if (delta == 0) { + return Long.compare(this.getId(), to.getId()); + } else if (delta < 0) { + return -1; + } else { + return 1; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceState.java new file mode 100644 index 00000000000..f5de5cb3016 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceState.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +public enum ComponentInstanceState { + INIT, + STARTED, + READY, + UPGRADING +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/RestApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/RestApiConstants.java new file mode 100644 index 00000000000..35e19801702 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/RestApiConstants.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +public interface RestApiConstants { + + // Rest endpoints + String CONTEXT_ROOT = "/ws/v1"; + String VERSION = "/services/version"; + String SERVICE_ROOT_PATH = "/services"; + String SERVICE_PATH = "/services/{service_name}"; + String COMPONENT_PATH = "/services/{service_name}/components/{component_name}"; + + // Query param + String SERVICE_NAME = "service_name"; + String COMPONENT_NAME = "component_name"; + + Long DEFAULT_UNLIMITED_LIFETIME = -1l; + + Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001; + Integer ERROR_CODE_APP_IS_NOT_RUNNING = 404002; + Integer ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET = 404003; + Integer ERROR_CODE_APP_NAME_INVALID = 404004; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/SliderExitCodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/SliderExitCodes.java new file mode 100644 index 00000000000..ee270cb90dd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/SliderExitCodes.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes; + +public interface SliderExitCodes extends LauncherExitCodes { + + /** + * starting point for exit codes; not an exception itself + */ + int _EXIT_CODE_BASE = 64; + + /** + * service entered the failed state: {@value} + */ + int EXIT_YARN_SERVICE_FAILED = 65; + + /** + * service was killed: {@value} + */ + int EXIT_YARN_SERVICE_KILLED = 66; + + /** + * timeout on monitoring client: {@value} + */ + int EXIT_TIMED_OUT = 67; + + /** + * service finished with an error: {@value} + */ + int EXIT_YARN_SERVICE_FINISHED_WITH_ERROR = 68; + + /** + * the service instance is unknown: {@value} + */ + int EXIT_UNKNOWN_INSTANCE = 69; + + /** + * the service instance is in the wrong state for that operation: {@value} + */ + int EXIT_BAD_STATE = 70; + + /** + * A spawned master process failed + */ + int EXIT_PROCESS_FAILED = 71; + + /** + * The instance failed -too many containers were + * failing or some other threshold was reached + */ + int EXIT_DEPLOYMENT_FAILED = 72; + + /** + * The service is live -and the requested operation + * does not work if the cluster is running + */ + int EXIT_APPLICATION_IN_USE = 73; + + /** + * There already is an service instance of that name + * when an attempt is made to create a new instance + */ + int EXIT_INSTANCE_EXISTS = 75; + + /** + * Exit code when the configurations in valid/incomplete: {@value} + */ + int EXIT_BAD_CONFIGURATION = 77; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java new file mode 100644 index 00000000000..a7bd58d0a00 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.service.api.records.Configuration; + +public class YarnServiceConf { + + // Retry settings for the ServiceClient to talk to Service AppMaster + public static final String CLIENT_AM_RETRY_MAX_WAIT_MS = "yarn.service.client-am.retry.max-wait-ms"; + public static final String CLIENT_AM_RETRY_MAX_INTERVAL_MS = "yarn.service.client-am.retry-interval-ms"; + + // Retry settings for container failures + public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max"; + public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval-ms"; + + public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts"; + public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory"; + public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024; + + public static final String YARN_QUEUE = "yarn.service.queue"; + + public static final String API_SERVER_ADDRESS = "yarn.service.api-server.address"; + public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:"; + public static final int DEFAULT_API_SERVER_PORT = 9191; + + public static final String FINAL_LOG_INCLUSION_PATTERN = "yarn.service.log.include-pattern"; + public static final String FINAL_LOG_EXCLUSION_PATTERN = "yarn.service.log.exclude-pattern"; + + public static final String ROLLING_LOG_INCLUSION_PATTERN = "yarn.service.rolling-log.include-pattern"; + public static final String ROLLING_LOG_EXCLUSION_PATTERN = "yarn.service.rolling-log.exclude-pattern"; + + + /** + * The yarn service base path: + * Defaults to HomeDir/.yarn/ + */ + public static final String YARN_SERVICE_BASE_PATH = "yarn.service.base.path"; + + //TODO rename + /** Declare that a keytab must be provided */ + public static final String KEY_AM_LOGIN_KEYTAB_REQUIRED = "slider.am.login.keytab.required"; + public static final String KEY_AM_LOGIN_KEYTAB_NAME = "slider.am.login.keytab.name"; + public static final String KEY_HDFS_KEYTAB_DIR = "slider.hdfs.keytab.dir"; + public static final String KEY_AM_KEYTAB_LOCAL_PATH = "slider.am.keytab.local.path"; + + /** + * maximum number of failed containers (in a single component) + * before the app exits + */ + public static final String CONTAINER_FAILURE_THRESHOLD = + "yarn.service.container-failure-per-component.threshold"; + /** + * Maximum number of container failures on a node before the node is blacklisted + */ + public static final String NODE_BLACKLIST_THRESHOLD = + "yarn.service.node-blacklist.threshold"; + + /** + * The failure count for CONTAINER_FAILURE_THRESHOLD and NODE_BLACKLIST_THRESHOLD + * gets reset periodically, the unit is seconds. + */ + public static final String CONTAINER_FAILURE_WINDOW = + "yarn.service.failure-count-reset.window"; + + /** + * interval between readiness checks. + */ + public static final String READINESS_CHECK_INTERVAL = "yarn.service.readiness-check-interval.seconds"; + public static final int DEFAULT_READINESS_CHECK_INTERVAL = 30; // seconds + + /** + * Get long value for the property. First get from the userConf, if not + * present, get from systemConf. + * + * @param name name of the property + * @param defaultValue default value of the property, if it is not defined in + * userConf and systemConf. + * @param userConf Configuration provided by client in the JSON definition + * @param systemConf The YarnConfiguration in the system. + * @return long value for the property + */ + public static long getLong(String name, long defaultValue, + Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) { + return userConf.getPropertyLong(name, systemConf.getLong(name, defaultValue)); + } + + public static int getInt(String name, int defaultValue, + Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) { + return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue)); + } + + public static String get(String name, String defaultVal, + Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) { + return userConf.getProperty(name, systemConf.get(name, defaultVal)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java new file mode 100644 index 00000000000..3973759e3c7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +public interface YarnServiceConstants { + + /** + * The path under which cluster and temp data are stored + */ + String SERVICE_BASE_DIRECTORY = ".yarn"; + + /** + * The paths under which Service AM dependency libraries are stored + */ + String DEPENDENCY_LOCALIZED_DIR_LINK = "service_dep"; + String DEPENDENCY_DIR = "/yarn-services/%s/"; + String DEPENDENCY_TAR_GZ_FILE_NAME = "service-dep"; + String DEPENDENCY_TAR_GZ_FILE_EXT = ".tar.gz"; + String DEPENDENCY_DIR_PERMISSIONS = "755"; + + /** + * Service type for YARN service + */ + String APP_TYPE = "yarn-service"; + + String KEYTAB_DIR = "keytabs"; + String RESOURCE_DIR = "resources"; + + + String SERVICES_DIRECTORY = "services"; + + /** + * JVM property to define the service lib directory; + * this is set by the yarn.sh script + */ + String PROPERTY_LIB_DIR = "service.libdir"; + + /** + * name of generated dir for this conf + */ + String SUBMITTED_CONF_DIR = "conf"; + + /** + * Service AM log4j file name + */ + String YARN_SERVICE_LOG4J_FILENAME = "yarnservice-log4j.properties"; + + /** + * Log4j sysprop to name the resource + */ + String SYSPROP_LOG4J_CONFIGURATION = "log4j.configuration"; + + /** + * sysprop for Service AM log4j directory + */ + String SYSPROP_LOG_DIR = "LOG_DIR"; + + String TMP_DIR_PREFIX = "tmp"; + + + String SERVICE_CORE_JAR = "yarn-service-core.jar"; + + String STDOUT_AM = "serviceam-out.txt"; + String STDERR_AM = "serviceam-err.txt"; + + String HADOOP_USER_NAME = "HADOOP_USER_NAME"; + + String APP_CONF_DIR = "conf"; + + String APP_LIB_DIR = "lib"; + + String OUT_FILE = "stdout.txt"; + String ERR_FILE = "stderr.txt"; + + String CONTENT = "content"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java new file mode 100644 index 00000000000..2d7c3bbba84 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerRetryContext; +import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.utils.CoreFileSystem; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.hadoop.yarn.service.provider.docker.DockerKeys.DEFAULT_DOCKER_NETWORK; + +/** + * Launcher of applications: base class + */ +public class AbstractLauncher { + private static final Logger log = + LoggerFactory.getLogger(AbstractLauncher.class); + public static final String CLASSPATH = "CLASSPATH"; + /** + * Filesystem to use for the launch + */ + protected final CoreFileSystem coreFileSystem; + /** + * Env vars; set up at final launch stage + */ + protected final Map envVars = new HashMap<>(); + protected final ContainerLaunchContext containerLaunchContext = + Records.newRecord(ContainerLaunchContext.class); + protected final List commands = new ArrayList<>(20); + protected final Map localResources = new HashMap<>(); + protected final Map mountPaths = new HashMap<>(); + private final Map serviceData = new HashMap<>(); + // security + protected final Credentials credentials; + protected boolean yarnDockerMode = false; + protected String dockerImage; + protected String dockerNetwork = DEFAULT_DOCKER_NETWORK; + protected String dockerHostname; + protected String runPrivilegedContainer; + + + /** + * Create instance. + * @param coreFileSystem filesystem + * @param credentials initial set of credentials -null is permitted + */ + public AbstractLauncher( + CoreFileSystem coreFileSystem, + Credentials credentials) { + this.coreFileSystem = coreFileSystem; + this.credentials = credentials != null ? credentials: new Credentials(); + } + + public void setYarnDockerMode(boolean yarnDockerMode){ + this.yarnDockerMode = yarnDockerMode; + } + + /** + * Get the env vars to work on + * @return env vars + */ + public Map getEnv() { + return envVars; + } + + /** + * Get the launch commands. + * @return the live list of commands + */ + public List getCommands() { + return commands; + } + + public void addLocalResource(String subPath, LocalResource resource) { + localResources.put(subPath, resource); + } + + public void addLocalResource(String subPath, LocalResource resource, String mountPath) { + localResources.put(subPath, resource); + mountPaths.put(subPath, mountPath); + } + + /** + * Accessor to the credentials + * @return the credentials associated with this launcher + */ + public Credentials getCredentials() { + return credentials; + } + + + public void addCommand(String cmd) { + commands.add(cmd); + } + + /** + * Complete the launch context (copy in env vars, etc). + * @return the container to launch + */ + public ContainerLaunchContext completeContainerLaunch() throws IOException { + + String cmdStr = ServiceUtils.join(commands, " ", false); + log.debug("Completed setting up container command {}", cmdStr); + containerLaunchContext.setCommands(commands); + + //env variables + if (log.isDebugEnabled()) { + log.debug("Environment variables"); + for (Map.Entry envPair : envVars.entrySet()) { + log.debug(" \"{}\"=\"{}\"", envPair.getKey(), envPair.getValue()); + } + } + containerLaunchContext.setEnvironment(envVars); + + //service data + if (log.isDebugEnabled()) { + log.debug("Service Data size"); + for (Map.Entry entry : serviceData.entrySet()) { + log.debug("\"{}\"=> {} bytes of data", entry.getKey(), + entry.getValue().array().length); + } + } + containerLaunchContext.setServiceData(serviceData); + + // resources + dumpLocalResources(); + containerLaunchContext.setLocalResources(localResources); + + //tokens + log.debug("{} tokens", credentials.numberOfTokens()); + containerLaunchContext.setTokens(CredentialUtils.marshallCredentials( + credentials)); + + if(yarnDockerMode){ + Map env = containerLaunchContext.getEnvironment(); + env.put("YARN_CONTAINER_RUNTIME_TYPE", "docker"); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_IMAGE", dockerImage); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK", dockerNetwork); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME", + dockerHostname); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER", runPrivilegedContainer); + StringBuilder sb = new StringBuilder(); + for (Entry mount : mountPaths.entrySet()) { + if (sb.length() > 0) { + sb.append(","); + } + sb.append(mount.getKey()); + sb.append(":"); + sb.append(mount.getValue()); + } + env.put("YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS", sb.toString()); + log.info("yarn docker env var has been set {}", containerLaunchContext.getEnvironment().toString()); + } + + return containerLaunchContext; + } + + public void setRetryContext(int maxRetries, int retryInterval) { + ContainerRetryContext retryContext = ContainerRetryContext + .newInstance(ContainerRetryPolicy.RETRY_ON_ALL_ERRORS, null, maxRetries, + retryInterval); + containerLaunchContext.setContainerRetryContext(retryContext); + } + + /** + * Dump local resources at debug level + */ + private void dumpLocalResources() { + if (log.isDebugEnabled()) { + log.debug("{} resources: ", localResources.size()); + for (Map.Entry entry : localResources.entrySet()) { + + String key = entry.getKey(); + LocalResource val = entry.getValue(); + log.debug(key + "=" + ServiceUtils.stringify(val.getResource())); + } + } + } + + /** + * This is critical for an insecure cluster -it passes + * down the username to YARN, and so gives the code running + * in containers the rights it needs to work with + * data. + * @throws IOException problems working with current user + */ + protected void propagateUsernameInInsecureCluster() throws IOException { + //insecure cluster: propagate user name via env variable + String userName = UserGroupInformation.getCurrentUser().getUserName(); + envVars.put(YarnServiceConstants.HADOOP_USER_NAME, userName); + } + + /** + * Utility method to set up the classpath + * @param classpath classpath to use + */ + public void setClasspath(ClasspathConstructor classpath) { + setEnv(CLASSPATH, classpath.buildClasspath()); + } + + /** + * Set an environment variable in the launch context + * @param var variable name + * @param value value (must be non null) + */ + public void setEnv(String var, String value) { + Preconditions.checkArgument(var != null, "null variable name"); + Preconditions.checkArgument(value != null, "null value"); + envVars.put(var, value); + } + + + public void putEnv(Map map) { + envVars.putAll(map); + } + + + public void setDockerImage(String dockerImage) { + this.dockerImage = dockerImage; + } + + public void setDockerNetwork(String dockerNetwork) { + this.dockerNetwork = dockerNetwork; + } + + public void setDockerHostname(String dockerHostname) { + this.dockerHostname = dockerHostname; + } + + public void setRunPrivilegedContainer(boolean runPrivilegedContainer) { + if (runPrivilegedContainer) { + this.runPrivilegedContainer = Boolean.toString(true); + } else { + this.runPrivilegedContainer = Boolean.toString(false); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ClasspathConstructor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ClasspathConstructor.java new file mode 100644 index 00000000000..711abb25eac --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ClasspathConstructor.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * build a classpath -allows for entries to be injected in front of + * YARN classpath as well as behind, adds appropriate separators, + * extraction of local classpath, etc. + */ +public class ClasspathConstructor { + + public static final String CLASS_PATH_SEPARATOR = ApplicationConstants.CLASS_PATH_SEPARATOR; + private final List pathElements = new ArrayList<>(); + + public ClasspathConstructor() { + } + + + /** + * Get the list of JARs from the YARN settings + * @param config configuration + */ + public List yarnApplicationClasspath(Configuration config) { + String[] cp = config.getTrimmedStrings( + YarnConfiguration.YARN_APPLICATION_CLASSPATH, + YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH); + return cp != null ? Arrays.asList(cp) : new ArrayList(0); + + } + + + @Override + public String toString() { + return buildClasspath(); + } + + public String buildClasspath() { + return ServiceUtils.join(pathElements, + CLASS_PATH_SEPARATOR, + false); + } + + /** + * Get a copy of the path list + * @return the JARs + */ + public List getPathElements() { + return Collections.unmodifiableList(pathElements); + } + + /** + * Append an entry + * @param path path + */ + public void append(String path) { + pathElements.add(path); + } + + /** + * Insert a path at the front of the list. This places it ahead of + * the standard YARN artifacts + * @param path path to the JAR. Absolute or relative -on the target + * system + */ + public void insert(String path) { + pathElements.add(0, path); + } + + public void appendAll(Collection paths) { + pathElements.addAll(paths); + } + + public void insertAll(Collection paths) { + pathElements.addAll(0, paths); + } + + + public void addLibDir(String pathToLibDir) { + append(buildLibDir(pathToLibDir)); + } + + public void insertLibDir(String pathToLibDir) { + insert(buildLibDir(pathToLibDir)); + } + + public void addClassDirectory(String pathToDir) { + append(appendDirectoryTerminator(pathToDir)); + } + + public void insertClassDirectory(String pathToDir) { + insert(buildLibDir(appendDirectoryTerminator(pathToDir))); + } + + + public void addRemoteClasspathEnvVar() { + append(ApplicationConstants.Environment.CLASSPATH.$$()); + } + + + public void insertRemoteClasspathEnvVar() { + append(ApplicationConstants.Environment.CLASSPATH.$$()); + } + + + /** + * Build a lib dir path + * @param pathToLibDir path to the directory; may or may not end with a + * trailing space + * @return a path to a lib dir that is compatible with the java classpath + */ + public String buildLibDir(String pathToLibDir) { + String dir = appendDirectoryTerminator(pathToLibDir); + dir += "*"; + return dir; + } + + private String appendDirectoryTerminator(String pathToLibDir) { + String dir = pathToLibDir.trim(); + if (!dir.endsWith("/")) { + dir += "/"; + } + return dir; + } + + /** + * Split a classpath. This uses the local path separator so MUST NOT + * be used to work with remote classpaths + * @param localpath local path + * @return a splite + */ + public Collection splitClasspath(String localpath) { + String separator = System.getProperty("path.separator"); + return StringUtils.getStringCollection(localpath, separator); + } + + /** + * Get the local JVM classpath split up + * @return the list of entries on the JVM classpath env var + */ + public Collection localJVMClasspath() { + return splitClasspath(System.getProperty("java.class.path")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java new file mode 100644 index 00000000000..5ed56e31c46 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; + +import java.util.ArrayList; +import java.util.List; + +/** + * Build a single command line to include in the container commands; + * Special support for JVM command buildup. + */ +public class CommandLineBuilder { + protected final List argumentList = new ArrayList<>(20); + + /** + * Add an entry to the command list + * @param args arguments -these will be converted strings + */ + public void add(Object... args) { + for (Object arg : args) { + argumentList.add(arg.toString()); + } + } + + // Get the number of arguments + public int size() { + return argumentList.size(); + } + + /** + * Append the output and error files to the tail of the command + * @param stdout out + * @param stderr error. Set this to null to append into stdout + */ + public void addOutAndErrFiles(String stdout, String stderr) { + Preconditions.checkNotNull(stdout, "Null output file"); + Preconditions.checkState(!stdout.isEmpty(), "output filename invalid"); + // write out the path output + argumentList.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + + stdout); + if (stderr != null) { + argumentList.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + + stderr); + } else { + argumentList.add("2>&1"); + } + } + + /** + * This just returns the command line + * @see #build() + * @return the command line + */ + @Override + public String toString() { + return build(); + } + + /** + * Build the command line + * @return the command line + */ + public String build() { + return ServiceUtils.join(argumentList, " "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java new file mode 100644 index 00000000000..0e51a6228c0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class ContainerLaunchService extends AbstractService{ + + protected static final Logger LOG = + LoggerFactory.getLogger(ContainerLaunchService.class); + + private ExecutorService executorService; + private SliderFileSystem fs; + + public ContainerLaunchService(SliderFileSystem fs) { + super(ContainerLaunchService.class.getName()); + this.fs = fs; + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + executorService = Executors.newCachedThreadPool(); + super.serviceInit(conf); + } + + @Override + protected void serviceStop() throws Exception { + if (executorService != null) { + executorService.shutdownNow(); + } + super.serviceStop(); + } + + public void launchCompInstance(Service service, + ComponentInstance instance, Container container) { + ContainerLauncher launcher = + new ContainerLauncher(service, instance, container); + executorService.execute(launcher); + } + + private class ContainerLauncher implements Runnable { + public final Container container; + public final Service service; + public ComponentInstance instance; + + public ContainerLauncher( + Service service, + ComponentInstance instance, Container container) { + this.container = container; + this.service = service; + this.instance = instance; + } + + @Override public void run() { + Component compSpec = instance.getCompSpec(); + ProviderService provider = ProviderFactory.getProviderService( + compSpec.getArtifact()); + AbstractLauncher launcher = new AbstractLauncher(fs, null); + try { + provider.buildContainerLaunchContext(launcher, service, + instance, fs, getConfig()); + instance.getComponent().getScheduler().getNmClient() + .startContainerAsync(container, + launcher.completeContainerLaunch()); + } catch (Exception e) { + LOG.error(instance.getCompInstanceId() + + ": Failed to launch container. ", e); + + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java new file mode 100644 index 00000000000..fce58e50c91 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java @@ -0,0 +1,319 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; +import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.client.api.TimelineClient; +import org.apache.hadoop.yarn.client.api.YarnClient; +import org.apache.hadoop.yarn.conf.HAUtil; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.text.DateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.*; + +/** + * Utils to work with credentials and tokens. + * + * Designed to be movable to Hadoop core + */ +public final class CredentialUtils { + + private CredentialUtils() { + } + + private static final Logger LOG = + LoggerFactory.getLogger(CredentialUtils.class); + + /** + * Save credentials to a byte buffer. Returns null if there were no + * credentials to save + * @param credentials credential set + * @return a byte buffer of serialized tokens + * @throws IOException if the credentials could not be written to the stream + */ + public static ByteBuffer marshallCredentials(Credentials credentials) throws IOException { + ByteBuffer buffer = null; + if (!credentials.getAllTokens().isEmpty()) { + DataOutputBuffer dob = new DataOutputBuffer(); + try { + credentials.writeTokenStorageToStream(dob); + } finally { + dob.close(); + } + buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + } + return buffer; + } + + /** + * Save credentials to a file + * @param file file to save to (will be overwritten) + * @param credentials credentials to write + * @throws IOException + */ + public static void saveTokens(File file, + Credentials credentials) throws IOException { + try(DataOutputStream daos = new DataOutputStream( + new FileOutputStream(file))) { + credentials.writeTokenStorageToStream(daos); + } + } + + /** + * Look up and return the resource manager's principal. This method + * automatically does the _HOST replacement in the principal and + * correctly handles HA resource manager configurations. + * + * From: YARN-4629 + * @param conf the {@link Configuration} file from which to read the + * principal + * @return the resource manager's principal string + * @throws IOException thrown if there's an error replacing the host name + */ + public static String getRMPrincipal(Configuration conf) throws IOException { + String principal = conf.get(RM_PRINCIPAL, ""); + String hostname; + Preconditions.checkState(!principal.isEmpty(), "Not set: " + RM_PRINCIPAL); + + if (HAUtil.isHAEnabled(conf)) { + YarnConfiguration yarnConf = new YarnConfiguration(conf); + if (yarnConf.get(RM_HA_ID) == null) { + // If RM_HA_ID is not configured, use the first of RM_HA_IDS. + // Any valid RM HA ID should work. + String[] rmIds = yarnConf.getStrings(RM_HA_IDS); + Preconditions.checkState((rmIds != null) && (rmIds.length > 0), + "Not set " + RM_HA_IDS); + yarnConf.set(RM_HA_ID, rmIds[0]); + } + + hostname = yarnConf.getSocketAddr( + RM_ADDRESS, + DEFAULT_RM_ADDRESS, + DEFAULT_RM_PORT).getHostName(); + } else { + hostname = conf.getSocketAddr( + RM_ADDRESS, + DEFAULT_RM_ADDRESS, + DEFAULT_RM_PORT).getHostName(); + } + return SecurityUtil.getServerPrincipal(principal, hostname); + } + + /** + * Create and add any filesystem delegation tokens with + * the RM(s) configured to be able to renew them. Returns null + * on an insecure cluster (i.e. harmless) + * @param conf configuration + * @param fs filesystem + * @param credentials credentials to update + * @return a list of all added tokens. + * @throws IOException + */ + public static Token[] addRMRenewableFSDelegationTokens(Configuration conf, + FileSystem fs, + Credentials credentials) throws IOException { + Preconditions.checkArgument(conf != null); + Preconditions.checkArgument(credentials != null); + if (UserGroupInformation.isSecurityEnabled()) { + return fs.addDelegationTokens(CredentialUtils.getRMPrincipal(conf), + credentials); + } + return null; + } + + /** + * Add an FS delegation token which can be renewed by the current user + * @param fs filesystem + * @param credentials credentials to update + * @throws IOException problems. + */ + public static void addSelfRenewableFSDelegationTokens( + FileSystem fs, + Credentials credentials) throws IOException { + Preconditions.checkArgument(fs != null); + Preconditions.checkArgument(credentials != null); + fs.addDelegationTokens( + getSelfRenewer(), + credentials); + } + + public static String getSelfRenewer() throws IOException { + return UserGroupInformation.getLoginUser().getShortUserName(); + } + + /** + * Create and add an RM delegation token to the credentials + * @param yarnClient Yarn Client + * @param credentials to add token to + * @return the token which was added + * @throws IOException + * @throws YarnException + */ + public static Token addRMDelegationToken(YarnClient yarnClient, + Credentials credentials) + throws IOException, YarnException { + Configuration conf = yarnClient.getConfig(); + Text rmPrincipal = new Text(CredentialUtils.getRMPrincipal(conf)); + Text rmDTService = ClientRMProxy.getRMDelegationTokenService(conf); + Token rmDelegationToken = + ConverterUtils.convertFromYarn( + yarnClient.getRMDelegationToken(rmPrincipal), + rmDTService); + credentials.addToken(rmDelegationToken.getService(), rmDelegationToken); + return rmDelegationToken; + } + + public static Token maybeAddTimelineToken( + Configuration conf, + Credentials credentials) + throws IOException, YarnException { + if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) { + LOG.debug("Timeline service enabled -fetching token"); + + try(TimelineClient timelineClient = TimelineClient.createTimelineClient()) { + timelineClient.init(conf); + timelineClient.start(); + Token token = + timelineClient.getDelegationToken( + CredentialUtils.getRMPrincipal(conf)); + credentials.addToken(token.getService(), token); + return token; + } + } else { + LOG.debug("Timeline service is disabled"); + return null; + } + } + + /** + * Filter a list of tokens from a set of credentials + * @param credentials credential source (a new credential set os re + * @param filter List of tokens to strip out + * @return a new, filtered, set of credentials + */ + public static Credentials filterTokens(Credentials credentials, + List filter) { + Credentials result = new Credentials(credentials); + Iterator> iter = + result.getAllTokens().iterator(); + while (iter.hasNext()) { + Token token = iter.next(); + LOG.debug("Token {}", token.getKind()); + if (filter.contains(token.getKind())) { + LOG.debug("Filtering token {}", token.getKind()); + iter.remove(); + } + } + return result; + } + + public static String dumpTokens(Credentials credentials, String separator) { + ArrayList> sorted = + new ArrayList<>(credentials.getAllTokens()); + Collections.sort(sorted, new TokenComparator()); + StringBuilder buffer = new StringBuilder(sorted.size()* 128); + for (Token token : sorted) { + buffer.append(tokenToString(token)).append(separator); + } + return buffer.toString(); + } + + /** + * Create a string for people to look at + * @param token token to convert to a string form + * @return a printable view of the token + */ + public static String tokenToString(Token token) { + DateFormat df = DateFormat.getDateTimeInstance( + DateFormat.SHORT, DateFormat.SHORT); + StringBuilder buffer = new StringBuilder(128); + buffer.append(token.toString()); + try { + TokenIdentifier ti = token.decodeIdentifier(); + buffer.append("; ").append(ti); + if (ti instanceof AbstractDelegationTokenIdentifier) { + // details in human readable form, and compensate for information HDFS DT omits + AbstractDelegationTokenIdentifier dt = (AbstractDelegationTokenIdentifier) ti; + buffer.append("; Renewer: ").append(dt.getRenewer()); + buffer.append("; Issued: ") + .append(df.format(new Date(dt.getIssueDate()))); + buffer.append("; Max Date: ") + .append(df.format(new Date(dt.getMaxDate()))); + } + } catch (IOException e) { + //marshall problem; not ours + LOG.debug("Failed to decode {}: {}", token, e, e); + } + return buffer.toString(); + } + + /** + * Get the expiry time of a token. + * @param token token to examine + * @return the time in milliseconds after which the token is invalid. + * @throws IOException + */ + public static long getTokenExpiryTime(Token token) throws IOException { + TokenIdentifier identifier = token.decodeIdentifier(); + Preconditions.checkState(identifier instanceof AbstractDelegationTokenIdentifier, + "Token %s of type: %s has an identifier which cannot be examined: %s", + token, token.getClass(), identifier); + AbstractDelegationTokenIdentifier id = + (AbstractDelegationTokenIdentifier) identifier; + return id.getMaxDate(); + } + + private static class TokenComparator + implements Comparator>, Serializable { + @Override + public int compare(Token left, + Token right) { + return left.getKind().toString().compareTo(right.getKind().toString()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java new file mode 100644 index 00000000000..7f6cc044eff --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; + +import java.util.Map; + +/** + * Command line builder purely for the Java CLI. + * Some of the define methods are designed to work with Hadoop tool and + * Slider launcher applications. + */ +public class JavaCommandLineBuilder extends CommandLineBuilder { + + public JavaCommandLineBuilder() { + add(getJavaBinary()); + } + + /** + * Get the java binary. This is called in the constructor so don't try and + * do anything other than return a constant. + * @return the path to the Java binary + */ + protected String getJavaBinary() { + return ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java"; + } + + /** + * Set the size of the heap if a non-empty heap is passed in. + * @param heap empty string or something like "128M" ,"1G" etc. The value is + * trimmed. + */ + public void setJVMHeap(String heap) { + if (ServiceUtils.isSet(heap)) { + add("-Xmx" + heap.trim()); + } + } + + /** + * Turn Java assertions on + */ + public void enableJavaAssertions() { + add("-ea"); + add("-esa"); + } + + /** + * Add a system property definition -must be used before setting the main entry point + * @param property + * @param value + */ + public void sysprop(String property, String value) { + Preconditions.checkArgument(property != null, "null property name"); + Preconditions.checkArgument(value != null, "null value"); + add("-D" + property + "=" + value); + } + + public JavaCommandLineBuilder forceIPv4() { + sysprop("java.net.preferIPv4Stack", "true"); + return this; + } + + public JavaCommandLineBuilder headless() { + sysprop("java.awt.headless", "true"); + return this; + } + + public boolean addConfOption(Configuration conf, String key) { + return defineIfSet(key, conf.get(key)); + } + + /** + * Add a varargs list of configuration parameters —if they are present + * @param conf configuration source + * @param keys keys + */ + public void addConfOptions(Configuration conf, String... keys) { + for (String key : keys) { + addConfOption(conf, key); + } + } + + /** + * Add all configuration options which match the prefix + * @param conf configuration + * @param prefix prefix, e.g {@code "slider."} + * @return the number of entries copied + */ + public int addPrefixedConfOptions(Configuration conf, String prefix) { + int copied = 0; + for (Map.Entry entry : conf) { + if (entry.getKey().startsWith(prefix)) { + define(entry.getKey(), entry.getValue()); + copied++; + } + } + return copied; + } + + /** + * Ass a configuration option to the command line of the application + * @param conf configuration + * @param key key + * @param defVal default value + * @return the resolved configuration option + * @throws IllegalArgumentException if key is null or the looked up value + * is null (that is: the argument is missing and devVal was null. + */ + public String addConfOptionToCLI(Configuration conf, + String key, + String defVal) { + Preconditions.checkArgument(key != null, "null key"); + String val = conf.get(key, defVal); + define(key, val); + return val; + } + + /** + * Add a -D key=val command to the CLI. This is very Hadoop API + * @param key key + * @param val value + * @throws IllegalArgumentException if either argument is null + */ + public void define(String key, String val) { + Preconditions.checkArgument(key != null, "null key"); + Preconditions.checkArgument(val != null, "null value"); + add("-D", key + "=" + val); + } + + /** + * Add a -D key=val command to the CLI if val + * is not null + * @param key key + * @param val value + */ + public boolean defineIfSet(String key, String val) { + Preconditions.checkArgument(key != null, "null key"); + if (val != null) { + define(key, val); + return true; + } else { + return false; + } + } + + /** + * Add a mandatory config option + * @param conf configuration + * @param key key + * @throws BadConfigException if the key is missing + */ + public void addMandatoryConfOption(Configuration conf, + String key) throws BadConfigException { + if (!addConfOption(conf, key)) { + throw new BadConfigException("Missing configuration option: " + key); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java new file mode 100644 index 00000000000..db9de7a1c26 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +import org.apache.hadoop.yarn.service.exceptions.SliderException; + +/** + * The system is in a bad state + */ +public class BadClusterStateException extends SliderException { + public BadClusterStateException(String message, + Object... args) { + super(EXIT_BAD_STATE, message, args); + } + + public BadClusterStateException(Throwable throwable, + String message, Object... args) { + super(EXIT_BAD_STATE, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java new file mode 100644 index 00000000000..41e325159d2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +public class BadCommandArgumentsException extends SliderException { + public BadCommandArgumentsException(String s, Object... args) { + super(EXIT_COMMAND_ARGUMENT_ERROR, s, args); + } + + public BadCommandArgumentsException(Throwable throwable, String message, + Object... args) { + super(EXIT_COMMAND_ARGUMENT_ERROR, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java new file mode 100644 index 00000000000..8199c3c17ed --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/** + * An exception to raise on a bad configuration + */ +public class BadConfigException extends SliderException { + + public BadConfigException(String s) { + super(EXIT_BAD_CONFIGURATION, s); + } + + public BadConfigException(String message, Object... args) { + super(EXIT_BAD_CONFIGURATION, message, args); + } + + public BadConfigException( + Throwable throwable, + String message, Object... args) { + super(EXIT_BAD_CONFIGURATION, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java new file mode 100644 index 00000000000..83658c89ea2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +public interface ErrorStrings { + + String PRINTF_E_INSTANCE_ALREADY_EXISTS = "Service Instance \"%s\" already exists and is defined in %s"; + String PRINTF_E_INSTANCE_DIR_ALREADY_EXISTS = "Service Instance dir already exists: %s"; + + /** + * ERROR Strings + */ + String ERROR_NO_ACTION = "No action specified"; + String ERROR_UNKNOWN_ACTION = "Unknown command: "; + String ERROR_NOT_ENOUGH_ARGUMENTS = + "Not enough arguments for action: "; + String ERROR_PARSE_FAILURE = + "Failed to parse "; + /** + * All the remaining values after argument processing + */ + String ERROR_TOO_MANY_ARGUMENTS = + "Too many arguments"; + String ERROR_DUPLICATE_ENTRY = "Duplicate entry for "; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java new file mode 100644 index 00000000000..d66b86030eb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/** + * Get the exit code of an exception. Making it an interface allows + * us to retrofit exit codes onto existing classes + */ +public interface ExitCodeProvider { + + /** + * Method to get the exit code + * @return the exit code + */ + int getExitCode(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java new file mode 100644 index 00000000000..483fb48d465 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/* + * Common Exit codes + *

+ * Exit codes from 64 up are service specific. + *

+ * Many of the exit codes are designed to resemble HTTP error codes, + * squashed into a single byte. e.g 44 , "not found" is the equivalent + * of 404 + *

+ *    0-10: general command issues
+ *   30-39: equivalent to the 3XX responses, where those responses are
+ *          considered errors by the service.
+ *   40-49: request-related errors
+ *   50-59: server-side problems. These may be triggered by the request.
+ *   64-  : service specific error codes
+ * 
+ */ +public interface LauncherExitCodes { + + /** + * 0: success + */ + int EXIT_SUCCESS = 0; + + /** + * -1: generic "false" response. The operation worked but + * the result was not true + */ + int EXIT_FALSE = -1; + + /** + * Exit code when a client requested service termination: {@value} + */ + int EXIT_CLIENT_INITIATED_SHUTDOWN = 1; + + /** + * Exit code when targets could not be launched: {@value} + */ + int EXIT_TASK_LAUNCH_FAILURE = 2; + + /** + * Exit code when a control-C, kill -3, signal was picked up: {@value} + */ + int EXIT_INTERRUPTED = 3; + + /** + * Exit code when a usage message was printed: {@value} + */ + int EXIT_USAGE = 4; + + /** + * Exit code when something happened but we can't be specific: {@value} + */ + int EXIT_OTHER_FAILURE = 5; + + /** + * Exit code on connectivity problems: {@value} + */ + int EXIT_MOVED = 31; + + /** + * found: {@value}. + *

+ * This is low value as in HTTP it is normally a success/redirect; + * whereas on the command line 0 is the sole success code. + *

+ * 302 Found + */ + int EXIT_FOUND = 32; + + /** + * Exit code on a request where the destination has not changed + * and (somehow) the command specified that this is an error. + * That is, this exit code is somehow different from a "success" + * : {@value} + *

+ * 304 Not Modified + */ + int EXIT_NOT_MODIFIED = 34; + + /** + * Exit code when the command line doesn't parse: {@value}, or + * when it is otherwise invalid. + *

+ * 400 BAD REQUEST + */ + int EXIT_COMMAND_ARGUMENT_ERROR = 40; + + /** + * The request requires user authentication: {@value} + *

+ * 401 Unauthorized + */ + int EXIT_UNAUTHORIZED = 41; + + /** + * Forbidden action: {@value} + *

+ * 403: Forbidden + */ + int EXIT_FORBIDDEN = 43; + + /** + * Something was not found: {@value} + *

+ * 404: NOT FOUND + */ + int EXIT_NOT_FOUND = 44; + + /** + * The operation is not allowed: {@value} + *

+ * 405: NOT ALLOWED + */ + int EXIT_OPERATION_NOT_ALLOWED = 45; + + /** + * The command is somehow not acceptable: {@value} + *

+ * 406: NOT ACCEPTABLE + */ + int EXIT_NOT_ACCEPTABLE = 46; + + /** + * Exit code on connectivity problems: {@value} + *

+ * 408: Request Timeout + */ + int EXIT_CONNECTIVITY_PROBLEM = 48; + + /** + * The request could not be completed due to a conflict with the current + * state of the resource. {@value} + *

+ * 409: conflict + */ + int EXIT_CONFLICT = 49; + + /** + * internal error: {@value} + *

+ * 500 Internal Server Error + */ + int EXIT_INTERNAL_ERROR = 50; + + /** + * Unimplemented feature: {@value} + *

+ * 501: Not Implemented + */ + int EXIT_UNIMPLEMENTED = 51; + + /** + * Service Unavailable; it may be available later: {@value} + *

+ * 503 Service Unavailable + */ + int EXIT_SERVICE_UNAVAILABLE = 53; + + /** + * The service does not support, or refuses to support this version: {@value}. + * If raised, this is expected to be raised server-side and likely due + * to client/server version incompatibilities. + *

+ * 505: Version Not Supported + */ + int EXIT_UNSUPPORTED_VERSION = 55; + + /** + * Exit code when an exception was thrown from the service: {@value} + *

+ * 5XX + */ + int EXIT_EXCEPTION_THROWN = 56; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java new file mode 100644 index 00000000000..ef22b578051 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +public interface RestApiErrorMessages { + String ERROR_APPLICATION_NAME_INVALID = + "Service name is either empty or not provided"; + String ERROR_APPLICATION_NAME_INVALID_FORMAT = + "Service name %s is not valid - only lower case letters, digits, " + + "and hyphen are allowed, and the name must be no more " + + "than 63 characters"; + String ERROR_COMPONENT_NAME_INVALID = + "Component name must be no more than %s characters: %s"; + String ERROR_USER_NAME_INVALID = + "User name must be no more than 63 characters"; + + String ERROR_APPLICATION_NOT_RUNNING = "Service not running"; + String ERROR_APPLICATION_DOES_NOT_EXIST = "Service not found"; + String ERROR_APPLICATION_IN_USE = "Service already exists in started" + + " state"; + String ERROR_APPLICATION_INSTANCE_EXISTS = "Service already exists in" + + " stopped/failed state (either restart with PUT or destroy with DELETE" + + " before creating a new one)"; + + String ERROR_SUFFIX_FOR_COMPONENT = + " for component %s (nor at the global level)"; + String ERROR_ARTIFACT_INVALID = "Artifact is not provided"; + String ERROR_ARTIFACT_FOR_COMP_INVALID = + ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_ARTIFACT_ID_INVALID = + "Artifact id (like docker image name) is either empty or not provided"; + String ERROR_ARTIFACT_ID_FOR_COMP_INVALID = + ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + + String ERROR_RESOURCE_INVALID = "Resource is not provided"; + String ERROR_RESOURCE_FOR_COMP_INVALID = + ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_RESOURCE_MEMORY_INVALID = + "Service resource or memory not provided"; + String ERROR_RESOURCE_CPUS_INVALID = + "Service resource or cpus not provided"; + String ERROR_RESOURCE_CPUS_INVALID_RANGE = + "Unacceptable no of cpus specified, either zero or negative"; + String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID = + ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID = + ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE = + ERROR_RESOURCE_CPUS_INVALID_RANGE + + " for component %s (or at the global level)"; + String ERROR_CONTAINERS_COUNT_INVALID = + "Invalid no of containers specified"; + String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID = + ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_DEPENDENCY_INVALID = "Dependency %s for component %s is " + + "invalid, does not exist as a component"; + String ERROR_DEPENDENCY_CYCLE = "Invalid dependencies, a cycle may " + + "exist: %s"; + + String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED = + "Cannot specify" + " cpus/memory along with profile"; + String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED = + ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED + + " for component %s"; + String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET = + "Resource profile is not " + "supported yet. Please specify cpus/memory."; + + String ERROR_NULL_ARTIFACT_ID = + "Artifact Id can not be null if artifact type is none"; + String ERROR_ABSENT_NUM_OF_INSTANCE = + "Num of instances should appear either globally or per component"; + String ERROR_ABSENT_LAUNCH_COMMAND = + "Launch_command is required when type is not DOCKER"; + + String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at" + + " component level, needs corresponding values set at service level"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java new file mode 100644 index 00000000000..e83ccbe5973 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * A service launch exception that includes an exit code; + * when caught by the ServiceLauncher, it will convert that + * into a process exit code. + */ +public class ServiceLaunchException extends YarnException + implements ExitCodeProvider, LauncherExitCodes { + + private final int exitCode; + + /** + * Create an exception with the specific exit code + * @param exitCode exit code + * @param cause cause of the exception + */ + public ServiceLaunchException(int exitCode, Throwable cause) { + super(cause); + this.exitCode = exitCode; + } + + /** + * Create an exception with the specific exit code and text + * @param exitCode exit code + * @param message message to use in exception + */ + public ServiceLaunchException(int exitCode, String message) { + super(message); + this.exitCode = exitCode; + } + + /** + * Create an exception with the specific exit code, text and cause + * @param exitCode exit code + * @param message message to use in exception + * @param cause cause of the exception + */ + public ServiceLaunchException(int exitCode, String message, Throwable cause) { + super(message, cause); + this.exitCode = exitCode; + } + + /** + * Get the exit code + * @return the exit code + */ + @Override + public int getExitCode() { + return exitCode; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java new file mode 100644 index 00000000000..5b74b80e298 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; + +public class SliderException extends ServiceLaunchException implements + SliderExitCodes { + public SliderException() { + super(EXIT_EXCEPTION_THROWN, "SliderException"); + } + + public SliderException(int code, String message) { + super(code, message); + } + + public SliderException(String s) { + super(EXIT_EXCEPTION_THROWN, s); + } + + public SliderException(String s, Throwable throwable) { + super(EXIT_EXCEPTION_THROWN, s, throwable); + } + + /** + * Format the exception as you create it + * @param code exit code + * @param message exception message -sprintf formatted + * @param args arguments for the formatting + */ + public SliderException(int code, String message, Object... args) { + super(code, String.format(message, args)); + } + + /** + * Format the exception, include a throwable. + * The throwable comes before the message so that it is out of the varargs + * @param code exit code + * @param throwable thrown + * @param message message + * @param args arguments + */ + public SliderException(int code, + Throwable throwable, + String message, + Object... args) { + super(code, String.format(message, args), throwable); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java new file mode 100644 index 00000000000..3a9fa25507d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/** + * Used to raise a usage exception ... this has the exit code + * {@link #EXIT_USAGE} + */ +public class UsageException extends SliderException { + public UsageException(String s, Object... args) { + super(EXIT_USAGE, s, args); + } + + public UsageException(Throwable throwable, String message, + Object... args) { + super(EXIT_USAGE, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java new file mode 100644 index 00000000000..33e33a62269 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.impl.pb.client; + +import com.google.protobuf.ServiceException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.RPCUtil; +import org.apache.hadoop.yarn.service.ClientAMProtocol; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.service.impl.pb.service.ClientAMProtocolPB; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; + +public class ClientAMProtocolPBClientImpl + implements ClientAMProtocol, Closeable { + + private ClientAMProtocolPB proxy; + + public ClientAMProtocolPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, ClientAMProtocolPB.class, + ProtobufRpcEngine.class); + proxy = RPC.getProxy(ClientAMProtocolPB.class, clientVersion, addr, conf); + + } + + @Override public FlexComponentsResponseProto flexComponents( + FlexComponentsRequestProto request) throws IOException, YarnException { + try { + return proxy.flexComponents(null, request); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + } + return null; + } + + @Override + public GetStatusResponseProto getStatus(GetStatusRequestProto request) + throws IOException, YarnException { + try { + return proxy.getStatus(null, request); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + } + return null; + } + + @Override + public StopResponseProto stop(StopRequestProto requestProto) + throws IOException, YarnException { + try { + return proxy.stop(null, requestProto); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + } + return null; + } + + @Override public void close() { + if (this.proxy != null) { + RPC.stopProxy(this.proxy); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java new file mode 100644 index 00000000000..6a9cd3785eb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.impl.pb.service; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.ClientAMProtocol; + +@ProtocolInfo( + protocolName = "org.apache.hadoop.yarn.service.ClientAMProtocol", + protocolVersion = 1) +public interface ClientAMProtocolPB extends + ClientAMProtocol.ClientAMProtocolService.BlockingInterface { +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java new file mode 100644 index 00000000000..710078112a0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.impl.pb.service; + +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.service.ClientAMProtocol; + +import java.io.IOException; + +public class ClientAMProtocolPBServiceImpl implements ClientAMProtocolPB { + + private ClientAMProtocol real; + + public ClientAMProtocolPBServiceImpl(ClientAMProtocol impl) { + this.real = impl; + } + + @Override + public FlexComponentsResponseProto flexComponents(RpcController controller, + FlexComponentsRequestProto request) throws ServiceException { + try { + return real.flexComponents(request); + } catch (IOException | YarnException e) { + throw new ServiceException(e); + } + } + + @Override public GetStatusResponseProto getStatus(RpcController controller, + GetStatusRequestProto request) throws ServiceException { + try { + return real.getStatus(request); + } catch (IOException | YarnException e) { + throw new ServiceException(e); + } + } + + @Override + public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto stop( + RpcController controller, + org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request) + throws ServiceException { + try { + return real.stop(request); + } catch (IOException | YarnException e) { + throw new ServiceException(e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java new file mode 100644 index 00000000000..982448ad713 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.component.ComponentEvent; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; +import org.apache.hadoop.yarn.service.component.ComponentState; +import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.STARTED; +import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_NOT_READY; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL; + +public class ServiceMonitor extends AbstractService { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceMonitor.class); + + public ScheduledExecutorService executorService; + private Map liveInstances = null; + private ServiceContext context; + private Configuration conf; + + public ServiceMonitor(String name, ServiceContext context) { + super(name); + liveInstances = context.scheduler.getLiveInstances(); + this.context = context; + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + executorService = Executors.newScheduledThreadPool(1); + this.conf = conf; + super.serviceInit(conf); + } + + @Override + public void serviceStart() throws Exception { + long readinessCheckInterval = YarnServiceConf + .getLong(READINESS_CHECK_INTERVAL, DEFAULT_READINESS_CHECK_INTERVAL, + context.service.getConfiguration(), conf); + + executorService + .scheduleAtFixedRate(new ReadinessChecker(), readinessCheckInterval, + readinessCheckInterval, TimeUnit.SECONDS); + + // Default 6 hours. + long failureResetInterval = YarnServiceConf + .getLong(CONTAINER_FAILURE_WINDOW, 21600, + context.service.getConfiguration(), conf); + + executorService + .scheduleAtFixedRate(new ContainerFailureReset(), failureResetInterval, + failureResetInterval, TimeUnit.SECONDS); + } + + @Override + public void serviceStop() throws Exception { + if (executorService != null) { + executorService.shutdownNow(); + } + } + + private class ReadinessChecker implements Runnable { + + @Override + public void run() { + + // check if the comp instance are ready + for (Map.Entry entry : liveInstances + .entrySet()) { + ComponentInstance instance = entry.getValue(); + + ProbeStatus status = instance.ping(); + if (status.isSuccess()) { + if (instance.getState() == STARTED) { + // synchronously update the state. + instance.handle( + new ComponentInstanceEvent(entry.getKey(), BECOME_READY)); + } + } else { + if (instance.getState() == READY) { + instance.handle( + new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY)); + } + } + } + + for (Component component : context.scheduler.getAllComponents() + .values()) { + // If comp hasn't started yet and its dependencies are satisfied + if (component.getState() == ComponentState.INIT && component + .areDependenciesReady()) { + LOG.info("[COMPONENT {}]: Dependencies satisfied, ramping up.", + component.getName()); + ComponentEvent event = new ComponentEvent(component.getName(), FLEX) + .setDesired(component.getComponentSpec().getNumberOfContainers()); + component.handle(event); + } + } + } + } + + private class ContainerFailureReset implements Runnable { + @Override + public void run() { + for (Component component : context.scheduler.getAllComponents().values()) { + component.resetCompFailureCount(); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java new file mode 100644 index 00000000000..1ed13a9c360 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Map; + +public class HttpProbe extends Probe { + protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class); + + private static final String HOST_TOKEN = "${THIS_HOST}"; + + private final String urlString; + private final int timeout; + private final int min, max; + + + public HttpProbe(String url, int timeout, int min, int max, Configuration + conf) { + super("Http probe of " + url + " [" + min + "-" + max + "]", conf); + this.urlString = url; + this.timeout = timeout; + this.min = min; + this.max = max; + } + + public static HttpProbe create(Map props) + throws IOException { + String urlString = getProperty(props, WEB_PROBE_URL, null); + new URL(urlString); + int timeout = getPropertyInt(props, WEB_PROBE_CONNECT_TIMEOUT, + WEB_PROBE_CONNECT_TIMEOUT_DEFAULT); + int minSuccess = getPropertyInt(props, WEB_PROBE_MIN_SUCCESS, + WEB_PROBE_MIN_SUCCESS_DEFAULT); + int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS, + WEB_PROBE_MAX_SUCCESS_DEFAULT); + return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null); + } + + + private static HttpURLConnection getConnection(URL url, int timeout) throws + IOException { + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setInstanceFollowRedirects(true); + connection.setConnectTimeout(timeout); + return connection; + } + + @Override + public ProbeStatus ping(ComponentInstance instance) { + ProbeStatus status = new ProbeStatus(); + ContainerStatus containerStatus = instance.getContainerStatus(); + if (containerStatus == null || ServiceUtils.isEmpty(containerStatus.getIPs()) + || StringUtils.isEmpty(containerStatus.getHost())) { + status.fail(this, new IOException("IP is not available yet")); + return status; + } + + String ip = containerStatus.getIPs().get(0); + HttpURLConnection connection = null; + try { + URL url = new URL(urlString.replace(HOST_TOKEN, ip)); + connection = getConnection(url, this.timeout); + int rc = connection.getResponseCode(); + if (rc < min || rc > max) { + String error = "Probe " + url + " error code: " + rc; + log.info(error); + status.fail(this, + new IOException(error)); + } else { + status.succeed(this); + } + } catch (Throwable e) { + String error = "Probe " + urlString + " failed for IP " + ip + ": " + e; + log.info(error, e); + status.fail(this, + new IOException(error, e)); + } finally { + if (connection != null) { + connection.disconnect(); + } + } + return status; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java new file mode 100644 index 00000000000..9ad86fe61bb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +/** + * Build up log entries for ease of splunk + */ +public class LogEntryBuilder { + + private final StringBuilder builder = new StringBuilder(); + + public LogEntryBuilder() { + } + + public LogEntryBuilder(String text) { + elt(text); + } + + + public LogEntryBuilder(String name, Object value) { + entry(name, value); + } + + public LogEntryBuilder elt(String text) { + addComma(); + builder.append(text); + return this; + } + + public LogEntryBuilder elt(String name, Object value) { + addComma(); + entry(name, value); + return this; + } + + private void addComma() { + if (!isEmpty()) { + builder.append(", "); + } + } + + private void entry(String name, Object value) { + builder.append(name).append('='); + if (value != null) { + builder.append('"').append(value.toString()).append('"'); + } else { + builder.append("null"); + } + } + + @Override + public String toString() { + return builder.toString(); + } + + private boolean isEmpty() { + return builder.length() == 0; + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java new file mode 100644 index 00000000000..55b55f68eec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +/** + * Config keys for monitoring + */ +public interface MonitorKeys { + + /** + * Port probing key : port to attempt to create a TCP connection to {@value}. + */ + String PORT_PROBE_PORT = "port"; + /** + * Port probing key : timeout for the the connection attempt {@value}. + */ + String PORT_PROBE_CONNECT_TIMEOUT = "timeout"; + /** + * Port probing default : timeout for the connection attempt {@value}. + */ + int PORT_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000; + + /** + * Web probing key : URL {@value}. + */ + String WEB_PROBE_URL = "url"; + /** + * Web probing key : min success code {@value}. + */ + String WEB_PROBE_MIN_SUCCESS = "min.success"; + /** + * Web probing key : max success code {@value}. + */ + String WEB_PROBE_MAX_SUCCESS = "max.success"; + /** + * Web probing default : min successful response code {@value}. + */ + int WEB_PROBE_MIN_SUCCESS_DEFAULT = 200; + /** + * Web probing default : max successful response code {@value}. + */ + int WEB_PROBE_MAX_SUCCESS_DEFAULT = 299; + /** + * Web probing key : timeout for the connection attempt {@value} + */ + String WEB_PROBE_CONNECT_TIMEOUT = "timeout"; + /** + * Port probing default : timeout for the connection attempt {@value}. + */ + int WEB_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java new file mode 100644 index 00000000000..c4f63aee7ae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.hadoop.yarn.service.api.records.ReadinessCheck; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Formatter; +import java.util.Locale; + +/** + * Various utils to work with the monitor + */ +public final class MonitorUtils { + protected static final Logger LOG = LoggerFactory.getLogger(MonitorUtils + .class); + + private MonitorUtils() { + } + + public static String toPlural(int val) { + return val != 1 ? "s" : ""; + } + + /** + * Convert milliseconds to human time -the exact format is unspecified + * @param milliseconds a time in milliseconds + * @return a time that is converted to human intervals + */ + public static String millisToHumanTime(long milliseconds) { + StringBuilder sb = new StringBuilder(); + // Send all output to the Appendable object sb + Formatter formatter = new Formatter(sb, Locale.US); + + long s = Math.abs(milliseconds / 1000); + long m = Math.abs(milliseconds % 1000); + if (milliseconds > 0) { + formatter.format("%d.%03ds", s, m); + } else if (milliseconds == 0) { + formatter.format("0"); + } else { + formatter.format("-%d.%03ds", s, m); + } + return sb.toString(); + } + + public static Probe getProbe(ReadinessCheck readinessCheck) { + if (readinessCheck == null) { + return null; + } + if (readinessCheck.getType() == null) { + return null; + } + try { + switch (readinessCheck.getType()) { + case HTTP: + return HttpProbe.create(readinessCheck.getProperties()); + case PORT: + return PortProbe.create(readinessCheck.getProperties()); + default: + return null; + } + } catch (Throwable t) { + throw new IllegalArgumentException("Error creating readiness check " + + t); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java new file mode 100644 index 00000000000..85569f86d4a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.Map; + +/** + * Probe for a port being open. + */ +public class PortProbe extends Probe { + protected static final Logger log = LoggerFactory.getLogger(PortProbe.class); + private final int port; + private final int timeout; + + public PortProbe(int port, int timeout) { + super("Port probe of " + port + " for " + timeout + "ms", null); + this.port = port; + this.timeout = timeout; + } + + public static PortProbe create(Map props) + throws IOException { + int port = getPropertyInt(props, PORT_PROBE_PORT, null); + + if (port >= 65536) { + throw new IOException(PORT_PROBE_PORT + " " + port + " is out of " + + "range"); + } + + int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT, + PORT_PROBE_CONNECT_TIMEOUT_DEFAULT); + + return new PortProbe(port, timeout); + } + + /** + * Try to connect to the (host,port); a failure to connect within + * the specified timeout is a failure. + * @param instance role instance + * @return the outcome + */ + @Override + public ProbeStatus ping(ComponentInstance instance) { + ProbeStatus status = new ProbeStatus(); + + if (instance.getContainerStatus() == null || ServiceUtils + .isEmpty(instance.getContainerStatus().getIPs())) { + status.fail(this, new IOException( + instance.getCompInstanceName() + ": IP is not available yet")); + return status; + } + + String ip = instance.getContainerStatus().getIPs().get(0); + InetSocketAddress sockAddr = new InetSocketAddress(ip, port); + Socket socket = new Socket(); + try { + if (log.isDebugEnabled()) { + log.debug(instance.getCompInstanceName() + ": Connecting " + sockAddr + .toString() + ", timeout=" + MonitorUtils + .millisToHumanTime(timeout)); + } + socket.connect(sockAddr, timeout); + status.succeed(this); + } catch (Throwable e) { + String error = + instance.getCompInstanceName() + ": Probe " + sockAddr + " failed"; + log.debug(error, e); + status.fail(this, new IOException(error, e)); + } finally { + IOUtils.closeSocket(socket); + } + return status; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java new file mode 100644 index 00000000000..3237a2bd499 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; + +import java.io.IOException; +import java.util.Map; + +/** + * Base class of all probes. + */ +public abstract class Probe implements MonitorKeys { + + protected final Configuration conf; + private String name; + + /** + * Create a probe of a specific name + * + * @param name probe name + * @param conf configuration being stored. + */ + public Probe(String name, Configuration conf) { + this.name = name; + this.conf = conf; + } + + + protected void setName(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + + @Override + public String toString() { + return getName(); + } + + public static String getProperty(Map props, String name, + String defaultValue) throws IOException { + String value = props.get(name); + if (StringUtils.isEmpty(value)) { + if (defaultValue == null) { + throw new IOException(name + " not specified"); + } + return defaultValue; + } + return value; + } + + public static int getPropertyInt(Map props, String name, + Integer defaultValue) throws IOException { + String value = props.get(name); + if (StringUtils.isEmpty(value)) { + if (defaultValue == null) { + throw new IOException(name + " not specified"); + } + return defaultValue; + } + return Integer.parseInt(value); + } + + /** + * perform any prelaunch initialization + */ + public void init() throws IOException { + + } + + /** + * Ping the endpoint. All exceptions must be caught and included in the + * (failure) status. + * + * @param instance instance to ping + * @return the status + */ + public abstract ProbeStatus ping(ComponentInstance instance); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java new file mode 100644 index 00000000000..bc62dcd0c1c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import java.io.Serializable; +import java.util.Date; + +/** + * Status message of a probe. This is designed to be sent over the wire, though the exception + * Had better be unserializable at the far end if that is to work. + */ +public final class ProbeStatus implements Serializable { + private static final long serialVersionUID = 165468L; + + private long timestamp; + private String timestampText; + private boolean success; + private boolean realOutcome; + private String message; + private Throwable thrown; + private transient Probe originator; + + public ProbeStatus() { + } + + public ProbeStatus(long timestamp, String message, Throwable thrown) { + this.success = false; + this.message = message; + this.thrown = thrown; + setTimestamp(timestamp); + } + + public ProbeStatus(long timestamp, String message) { + this.success = true; + setTimestamp(timestamp); + this.message = message; + this.thrown = null; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + timestampText = new Date(timestamp).toString(); + } + + public boolean isSuccess() { + return success; + } + + /** + * Set both the success and the real outcome bits to the same value + * @param success the new value + */ + public void setSuccess(boolean success) { + this.success = success; + realOutcome = success; + } + + public String getTimestampText() { + return timestampText; + } + + public boolean getRealOutcome() { + return realOutcome; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public Throwable getThrown() { + return thrown; + } + + public void setThrown(Throwable thrown) { + this.thrown = thrown; + } + + /** + * Get the probe that generated this result. May be null + * @return a possibly null reference to a probe + */ + public Probe getOriginator() { + return originator; + } + + /** + * The probe has succeeded -capture the current timestamp, set + * success to true, and record any other data needed. + * @param probe probe + */ + public void succeed(Probe probe) { + finish(probe, true, probe.getName(), null); + } + + /** + * A probe has failed either because the test returned false, or an exception + * was thrown. The {@link #success} field is set to false, any exception + * thrown is recorded. + * @param probe probe that failed + * @param thrown an exception that was thrown. + */ + public void fail(Probe probe, Throwable thrown) { + finish(probe, false, "Failure in " + probe, thrown); + } + + public void finish(Probe probe, boolean succeeded, String text, Throwable thrown) { + setTimestamp(System.currentTimeMillis()); + setSuccess(succeeded); + originator = probe; + message = text; + this.thrown = thrown; + } + + @Override + public String toString() { + LogEntryBuilder builder = new LogEntryBuilder("Probe Status"); + builder.elt("time", timestampText) + .elt("outcome", (success ? "success" : "failure")); + + if (success != realOutcome) { + builder.elt("originaloutcome", (realOutcome ? "success" : "failure")); + } + builder.elt("message", message); + if (thrown != null) { + builder.elt("exception", thrown); + } + + return builder.toString(); + } + + /** + * Flip the success bit on while the real outcome bit is kept false + */ + public void markAsSuccessful() { + success = true; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java new file mode 100644 index 00000000000..fc8953c02de --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; + +import java.io.IOException; +import java.nio.file.Paths; +import java.text.MessageFormat; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants.CONTENT; + +public abstract class AbstractClientProvider { + + public AbstractClientProvider() { + } + + /** + * Generates a fixed format of application tags given one or more of + * application name, version and description. This allows subsequent query for + * an application with a name only, version only or description only or any + * combination of those as filters. + * + * @param appName name of the application + * @param appVersion version of the application + * @param appDescription brief description of the application + * @return + */ + public static final Set createApplicationTags(String appName, + String appVersion, String appDescription) { + Set tags = new HashSet<>(); + tags.add(ServiceUtils.createNameTag(appName)); + if (appVersion != null) { + tags.add(ServiceUtils.createVersionTag(appVersion)); + } + if (appDescription != null) { + tags.add(ServiceUtils.createDescriptionTag(appDescription)); + } + return tags; + } + + /** + * Validate the artifact. + * @param artifact + */ + public abstract void validateArtifact(Artifact artifact, FileSystem + fileSystem) throws IOException; + + protected abstract void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException; + + /** + * Validate the config files. + * @param configFiles config file list + * @param fs file system + */ + public void validateConfigFiles(List configFiles, + FileSystem fs) throws IOException { + Set destFileSet = new HashSet<>(); + + for (ConfigFile file : configFiles) { + if (file.getType() == null) { + throw new IllegalArgumentException("File type is empty"); + } + + if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE)) { + if (StringUtils.isEmpty(file.getSrcFile()) && + !file.getProperties().containsKey(CONTENT)) { + throw new IllegalArgumentException(MessageFormat.format("For {0} " + + "format, either src_file must be specified in ConfigFile," + + " or the \"{1}\" key must be specified in " + + "the 'properties' field of ConfigFile. ", + ConfigFile.TypeEnum.TEMPLATE, CONTENT)); + } + } + if (!StringUtils.isEmpty(file.getSrcFile())) { + Path p = new Path(file.getSrcFile()); + if (!fs.exists(p)) { + throw new IllegalArgumentException( + "Src_file does not exist for config file: " + file + .getSrcFile()); + } + } + + if (StringUtils.isEmpty(file.getDestFile())) { + throw new IllegalArgumentException("Dest_file is empty."); + } + + if (destFileSet.contains(file.getDestFile())) { + throw new IllegalArgumentException( + "Duplicated ConfigFile exists: " + file.getDestFile()); + } + destFileSet.add(file.getDestFile()); + + java.nio.file.Path destPath = Paths.get(file.getDestFile()); + if (!destPath.isAbsolute() && destPath.getNameCount() > 1) { + throw new IllegalArgumentException("Non-absolute dest_file has more " + + "than one path element"); + } + + // provider-specific validation + validateConfigFile(file, fs); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java new file mode 100644 index 00000000000..6d7406199ad --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; +import org.apache.hadoop.yarn.service.containerlaunch.CommandLineBuilder; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX; +import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$; + +public abstract class AbstractProviderService implements ProviderService, + YarnServiceConstants { + + protected static final Logger log = + LoggerFactory.getLogger(AbstractProviderService.class); + + public abstract void processArtifact(AbstractLauncher launcher, + ComponentInstance compInstance, SliderFileSystem fileSystem, + Service service) + throws IOException; + + public void buildContainerLaunchContext(AbstractLauncher launcher, + Service service, ComponentInstance instance, + SliderFileSystem fileSystem, Configuration yarnConf) + throws IOException, SliderException { + Component component = instance.getComponent().getComponentSpec();; + processArtifact(launcher, instance, fileSystem, service); + + ServiceContext context = + instance.getComponent().getScheduler().getContext(); + // Generate tokens (key-value pair) for config substitution. + // Get pre-defined tokens + Map globalTokens = + instance.getComponent().getScheduler().globalTokens; + Map tokensForSubstitution = ProviderUtils + .initCompTokensForSubstitute(instance); + tokensForSubstitution.putAll(globalTokens); + // Set the environment variables in launcher + launcher.putEnv(ServiceUtils + .buildEnvMap(component.getConfiguration(), tokensForSubstitution)); + launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$()); + launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); + if (System.getenv(HADOOP_USER_NAME) != null) { + launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME)); + } + launcher.setEnv("LANG", "en_US.UTF-8"); + launcher.setEnv("LC_ALL", "en_US.UTF-8"); + launcher.setEnv("LANGUAGE", "en_US.UTF-8"); + + for (Entry entry : launcher.getEnv().entrySet()) { + tokensForSubstitution.put($(entry.getKey()), entry.getValue()); + } + //TODO add component host tokens? +// ProviderUtils.addComponentHostTokens(tokensForSubstitution, amState); + + // create config file on hdfs and add local resource + ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem, + component, tokensForSubstitution, instance, context); + + // substitute launch command + String launchCommand = component.getLaunchCommand(); + // docker container may have empty commands + if (!StringUtils.isEmpty(launchCommand)) { + launchCommand = ProviderUtils + .substituteStrWithTokens(launchCommand, tokensForSubstitution); + CommandLineBuilder operation = new CommandLineBuilder(); + operation.add(launchCommand); + operation.addOutAndErrFiles(OUT_FILE, ERR_FILE); + launcher.addCommand(operation.build()); + } + + // By default retry forever every 30 seconds + launcher.setRetryContext(YarnServiceConf + .getInt(CONTAINER_RETRY_MAX, -1, service.getConfiguration(), + yarnConf), YarnServiceConf + .getInt(CONTAINER_RETRY_INTERVAL, 30000, service.getConfiguration(), + yarnConf)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java new file mode 100644 index 00000000000..0f949e0bace --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base class for factories. + */ +public abstract class ProviderFactory { + protected static final Logger LOG = + LoggerFactory.getLogger(ProviderFactory.class); + + protected ProviderFactory() {} + + public abstract AbstractClientProvider createClientProvider(); + + public abstract ProviderService createServerProvider(); + + public static synchronized ProviderService getProviderService(Artifact + artifact) { + return createServiceProviderFactory(artifact).createServerProvider(); + } + + public static synchronized AbstractClientProvider getClientProvider(Artifact + artifact) { + return createServiceProviderFactory(artifact).createClientProvider(); + } + + /** + * Create a provider for a specific service + * @param artifact artifact + * @return provider factory + */ + public static synchronized ProviderFactory createServiceProviderFactory( + Artifact artifact) { + if (artifact == null || artifact.getType() == null) { + LOG.debug("Loading service provider type default"); + return DefaultProviderFactory.getInstance(); + } + LOG.debug("Loading service provider type {}", artifact.getType()); + switch (artifact.getType()) { + // TODO add handling for custom types? + // TODO handle service + case DOCKER: + return DockerProviderFactory.getInstance(); + case TARBALL: + return TarballProviderFactory.getInstance(); + default: + throw new IllegalArgumentException(String.format("Resolution error, " + + "%s should not be passed to createServiceProviderFactory", + artifact.getType())); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java new file mode 100644 index 00000000000..eb721b4a4f3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; + +import java.io.IOException; + +public interface ProviderService { + + /** + * Set up the entire container launch context + */ + void buildContainerLaunchContext(AbstractLauncher containerLauncher, + Service service, ComponentInstance instance, + SliderFileSystem sliderFileSystem, Configuration yarnConf) + throws IOException, SliderException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java new file mode 100644 index 00000000000..e074dd756ae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java @@ -0,0 +1,408 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; +import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.utils.PublishedConfiguration; +import org.apache.hadoop.yarn.service.utils.PublishedConfigurationOutputter; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.regex.Pattern; + +import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*; + +/** + * This is a factoring out of methods handy for providers. It's bonded to a log + * at construction time. + */ +public class ProviderUtils implements YarnServiceConstants { + + protected static final Logger log = + LoggerFactory.getLogger(ProviderUtils.class); + + + /** + * Add oneself to the classpath. This does not work + * on minicluster test runs where the JAR is not built up. + * @param providerResources map of provider resources to add these entries to + * @param providerClass provider to add + * @param jarName name of the jar to use + * @param sliderFileSystem target filesystem + * @param tempPath path in the cluster FS for temp files + * @param libdir relative directory to place resources + * @param miniClusterTestRun true if minicluster is being used + * @return true if the class was found in a JAR + * + * @throws FileNotFoundException if the JAR was not found and this is NOT + * a mini cluster test run + * @throws IOException IO problems + * @throws SliderException any Slider problem + */ + public static boolean addProviderJar( + Map providerResources, + Class providerClass, + String jarName, + SliderFileSystem sliderFileSystem, + Path tempPath, + String libdir, + boolean miniClusterTestRun) throws + IOException, + SliderException { + try { + ServiceUtils.putJar(providerResources, + sliderFileSystem, + providerClass, + tempPath, + libdir, + jarName); + return true; + } catch (FileNotFoundException e) { + if (miniClusterTestRun) { + return false; + } else { + throw e; + } + } + } + + /** + * Loads all dependency jars from the default path. + * @param providerResources map of provider resources to add these entries to + * @param sliderFileSystem target filesystem + * @param tempPath path in the cluster FS for temp files + * @param libDir relative directory to place resources + * @param libLocalSrcDir explicitly supplied local libs dir + * @throws IOException trouble copying to HDFS + * @throws SliderException trouble copying to HDFS + */ + public static void addAllDependencyJars( + Map providerResources, + SliderFileSystem sliderFileSystem, + Path tempPath, + String libDir, + String libLocalSrcDir) + throws IOException, SliderException { + if (ServiceUtils.isSet(libLocalSrcDir)) { + File file = new File(libLocalSrcDir); + if (!file.exists() || !file.isDirectory()) { + throw new BadCommandArgumentsException( + "Supplied lib src dir %s is not valid", libLocalSrcDir); + } + } + ServiceUtils.putAllJars(providerResources, sliderFileSystem, tempPath, + libDir, libLocalSrcDir); + } + + public static String substituteStrWithTokens(String content, + Map tokensForSubstitution) { + for (Map.Entry token : tokensForSubstitution.entrySet()) { + content = + content.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + } + return content; + } + + // configs will be substituted by corresponding env in tokenMap + public static void substituteMapWithTokens(Map configs, + Map tokenMap) { + for (Map.Entry entry : configs.entrySet()) { + String value = entry.getValue(); + if (tokenMap != null) { + for (Map.Entry token : tokenMap.entrySet()) { + value = + value.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + } + } + entry.setValue(value); + } + } + + /** + * Localize the service keytabs for the service. + * @param launcher container launcher + * @param fileSystem file system + * @throws IOException trouble uploading to HDFS + */ + public void localizeServiceKeytabs(AbstractLauncher launcher, + SliderFileSystem fileSystem, Service service) throws IOException { + + Configuration conf = service.getConfiguration(); + String keytabPathOnHost = + conf.getProperty(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH); + if (ServiceUtils.isUnset(keytabPathOnHost)) { + String amKeytabName = + conf.getProperty(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME); + String keytabDir = + conf.getProperty(YarnServiceConf.KEY_HDFS_KEYTAB_DIR); + // we need to localize the keytab files in the directory + Path keytabDirPath = fileSystem.buildKeytabPath(keytabDir, null, + service.getName()); + boolean serviceKeytabsDeployed = false; + if (fileSystem.getFileSystem().exists(keytabDirPath)) { + FileStatus[] keytabs = fileSystem.getFileSystem().listStatus( + keytabDirPath); + LocalResource keytabRes; + for (FileStatus keytab : keytabs) { + if (!amKeytabName.equals(keytab.getPath().getName()) + && keytab.getPath().getName().endsWith(".keytab")) { + serviceKeytabsDeployed = true; + log.info("Localizing keytab {}", keytab.getPath().getName()); + keytabRes = fileSystem.createAmResource(keytab.getPath(), + LocalResourceType.FILE); + launcher.addLocalResource(KEYTAB_DIR + "/" + + keytab.getPath().getName(), + keytabRes); + } + } + } + if (!serviceKeytabsDeployed) { + log.warn("No service keytabs for the service have been localized. " + + "If the service requires keytabs for secure operation, " + + "please ensure that the required keytabs have been uploaded " + + "to the folder {}", keytabDirPath); + } + } + } + + public static Path initCompInstanceDir(SliderFileSystem fs, + ComponentInstance instance) { + Path compDir = new Path(new Path(fs.getAppDir(), "components"), + instance.getCompName()); + Path compInstanceDir = new Path(compDir, instance.getCompInstanceName()); + instance.setCompInstanceDir(compInstanceDir); + return compInstanceDir; + } + + // 1. Create all config files for a component on hdfs for localization + // 2. Add the config file to localResource + public static synchronized void createConfigFileAndAddLocalResource( + AbstractLauncher launcher, SliderFileSystem fs, Component component, + Map tokensForSubstitution, ComponentInstance instance, + ServiceContext context) throws IOException { + Path compInstanceDir = initCompInstanceDir(fs, instance); + if (!fs.getFileSystem().exists(compInstanceDir)) { + log.info(instance.getCompInstanceId() + ": Creating dir on hdfs: " + compInstanceDir); + fs.getFileSystem().mkdirs(compInstanceDir, + new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); + } else { + log.info("Component instance conf dir already exists: " + compInstanceDir); + } + + if (log.isDebugEnabled()) { + log.debug("Tokens substitution for component instance: " + instance + .getCompInstanceName() + System.lineSeparator() + + tokensForSubstitution); + } + + for (ConfigFile originalFile : component.getConfiguration().getFiles()) { + ConfigFile configFile = originalFile.copy(); + String fileName = new Path(configFile.getDestFile()).getName(); + + // substitute file name + for (Map.Entry token : tokensForSubstitution.entrySet()) { + configFile.setDestFile(configFile.getDestFile() + .replaceAll(Pattern.quote(token.getKey()), token.getValue())); + } + + Path remoteFile = new Path(compInstanceDir, fileName); + if (!fs.getFileSystem().exists(remoteFile)) { + log.info("Saving config file on hdfs for component " + instance + .getCompInstanceName() + ": " + configFile); + + if (configFile.getSrcFile() != null) { + // Load config file template + switch (configFile.getType()) { + case HADOOP_XML: + // Hadoop_xml_template + resolveHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(), + tokensForSubstitution, configFile, remoteFile, context); + break; + case TEMPLATE: + // plain-template + resolvePlainTemplateAndSaveOnHdfs(fs.getFileSystem(), + tokensForSubstitution, configFile, remoteFile, context); + break; + default: + log.info("Not supporting loading src_file for " + configFile); + break; + } + } else { + // If src_file is not specified + resolvePropsInConfigFileAndSaveOnHdfs(fs, tokensForSubstitution, + instance, configFile, fileName, remoteFile); + } + } + + // Add resource for localization + LocalResource configResource = + fs.createAmResource(remoteFile, LocalResourceType.FILE); + File destFile = new File(configFile.getDestFile()); + String symlink = APP_CONF_DIR + "/" + fileName; + if (destFile.isAbsolute()) { + launcher.addLocalResource(symlink, configResource, + configFile.getDestFile()); + log.info("Add config file for localization: " + symlink + " -> " + + configResource.getResource().getFile() + ", dest mount path: " + + configFile.getDestFile()); + } else { + launcher.addLocalResource(symlink, configResource); + log.info("Add config file for localization: " + symlink + " -> " + + configResource.getResource().getFile()); + } + } + } + + private static void resolvePropsInConfigFileAndSaveOnHdfs(SliderFileSystem fs, + Map tokensForSubstitution, ComponentInstance instance, + ConfigFile configFile, String fileName, Path remoteFile) + throws IOException { + // substitute non-template configs + substituteMapWithTokens(configFile.getProperties(), tokensForSubstitution); + + // write configs onto hdfs + PublishedConfiguration publishedConfiguration = + new PublishedConfiguration(fileName, + configFile.getProperties().entrySet()); + if (!fs.getFileSystem().exists(remoteFile)) { + PublishedConfigurationOutputter configurationOutputter = + PublishedConfigurationOutputter.createOutputter( + ConfigFormat.resolve(configFile.getType().toString()), + publishedConfiguration); + try (FSDataOutputStream os = fs.getFileSystem().create(remoteFile)) { + configurationOutputter.save(os); + os.flush(); + } + } else { + log.info("Component instance = " + instance.getCompInstanceName() + + ", config file already exists: " + remoteFile); + } + } + + // 1. substitute config template - only handle hadoop_xml format + // 2. save on hdfs + @SuppressWarnings("unchecked") + private static void resolveHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs, + Map tokensForSubstitution, ConfigFile configFile, + Path remoteFile, ServiceContext context) throws IOException { + Map conf; + try { + conf = (Map) context.configCache.get(configFile); + } catch (ExecutionException e) { + log.info("Failed to load config file: " + configFile, e); + return; + } + // make a copy for substitution + org.apache.hadoop.conf.Configuration confCopy = + new org.apache.hadoop.conf.Configuration(false); + for (Map.Entry entry : conf.entrySet()) { + confCopy.set(entry.getKey(), entry.getValue()); + } + // substitute properties + for (Map.Entry entry : configFile.getProperties().entrySet()) { + confCopy.set(entry.getKey(), entry.getValue()); + } + // substitute env variables + for (Map.Entry entry : confCopy) { + String val = entry.getValue(); + if (val != null) { + for (Map.Entry token : tokensForSubstitution + .entrySet()) { + val = val.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + confCopy.set(entry.getKey(), val); + } + } + } + // save on hdfs + try (OutputStream output = fs.create(remoteFile)) { + confCopy.writeXml(output); + log.info("Reading config from: " + configFile.getSrcFile() + + ", writing to: " + remoteFile); + } + } + + // 1) read the template as a string + // 2) do token substitution + // 3) save on hdfs + private static void resolvePlainTemplateAndSaveOnHdfs(FileSystem fs, + Map tokensForSubstitution, ConfigFile configFile, + Path remoteFile, ServiceContext context) { + String content; + try { + content = (String) context.configCache.get(configFile); + } catch (ExecutionException e) { + log.info("Failed to load config file: " + configFile, e); + return; + } + // substitute tokens + content = substituteStrWithTokens(content, tokensForSubstitution); + + try (OutputStream output = fs.create(remoteFile)) { + org.apache.commons.io.IOUtils.write(content, output); + } catch (IOException e) { + log.info("Failed to create " + remoteFile); + } + } + + /** + * Get initial component token map to be substituted into config values. + * @return tokens to replace + */ + public static Map initCompTokensForSubstitute( + ComponentInstance instance) { + Map tokens = new HashMap<>(); + tokens.put(COMPONENT_NAME, instance.getCompSpec().getName()); + tokens + .put(COMPONENT_NAME_LC, instance.getCompSpec().getName().toLowerCase()); + tokens.put(COMPONENT_INSTANCE_NAME, instance.getCompInstanceName()); + tokens.put(CONTAINER_ID, instance.getContainer().getId().toString()); + tokens.put(COMPONENT_ID, + String.valueOf(instance.getCompInstanceId().getId())); + tokens.putAll(instance.getComponent().getDependencyHostIpTokens()); + return tokens; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java new file mode 100644 index 00000000000..0920a9ce114 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.defaultImpl; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; + +import java.io.IOException; +import java.nio.file.Paths; + +public class DefaultClientProvider extends AbstractClientProvider { + + public DefaultClientProvider() { + } + + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) { + } + + @Override + protected void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException { + // validate dest_file is not absolute + if (Paths.get(configFile.getDestFile()).isAbsolute()) { + throw new IllegalArgumentException( + "Dest_file must not be absolute path: " + configFile.getDestFile()); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java new file mode 100644 index 00000000000..868bba8f8dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.defaultImpl; + +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; + +public final class DefaultProviderFactory extends ProviderFactory { + private static final ProviderFactory FACTORY = new + DefaultProviderFactory(); + + private DefaultProviderFactory() {} + + private static class Client { + static final AbstractClientProvider PROVIDER = new DefaultClientProvider(); + } + + private static class Server { + static final ProviderService PROVIDER = new DefaultProviderService(); + } + + @Override + public AbstractClientProvider createClientProvider() { + return Client.PROVIDER; + } + + @Override + public ProviderService createServerProvider() { + return Server.PROVIDER; + } + + public static ProviderFactory getInstance() { + return FACTORY; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java new file mode 100644 index 00000000000..a3a0c1f96ee --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.defaultImpl; + +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.AbstractProviderService; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; + +import java.io.IOException; + +public class DefaultProviderService extends AbstractProviderService { + + @Override + public void processArtifact(AbstractLauncher launcher, + ComponentInstance compInstance, SliderFileSystem fileSystem, + Service service) + throws IOException { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java new file mode 100644 index 00000000000..d4a2254ff42 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; + +import java.io.IOException; + +public class DockerClientProvider extends AbstractClientProvider + implements YarnServiceConstants { + + public DockerClientProvider() { + super(); + } + + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) { + if (artifact == null) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_INVALID); + } + if (StringUtils.isEmpty(artifact.getId())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); + } + } + + @Override + protected void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java new file mode 100644 index 00000000000..f30c002c612 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +public interface DockerKeys { + String PROVIDER_DOCKER = "docker"; + String DOCKER_PREFIX = "docker."; + String DOCKER_IMAGE = DOCKER_PREFIX + "image"; + String DOCKER_NETWORK = DOCKER_PREFIX + "network"; + String DOCKER_USE_PRIVILEGED = DOCKER_PREFIX + "usePrivileged"; + String DOCKER_START_COMMAND = DOCKER_PREFIX + "startCommand"; + + String DEFAULT_DOCKER_NETWORK = "bridge"; + Boolean DEFAULT_DOCKER_USE_PRIVILEGED = false; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java new file mode 100644 index 00000000000..57330ab6ad0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; + +public class DockerProviderFactory extends ProviderFactory { + private static final ProviderFactory FACTORY = new + DockerProviderFactory(); + + private DockerProviderFactory() { + } + + private static class Client { + static final AbstractClientProvider PROVIDER = new DockerClientProvider(); + } + + private static class Server { + static final ProviderService PROVIDER = new DockerProviderService(); + } + + @Override + public AbstractClientProvider createClientProvider() { + return Client.PROVIDER; + } + + @Override + public ProviderService createServerProvider() { + return Server.PROVIDER; + } + + public static ProviderFactory getInstance() { + return FACTORY; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java new file mode 100644 index 00000000000..0741947d4af --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.AbstractProviderService; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; + +import java.io.IOException; +import java.text.MessageFormat; + +public class DockerProviderService extends AbstractProviderService + implements DockerKeys { + + public void processArtifact(AbstractLauncher launcher, + ComponentInstance compInstance, SliderFileSystem fileSystem, + Service service) throws IOException{ + launcher.setYarnDockerMode(true); + launcher.setDockerImage(compInstance.getCompSpec().getArtifact().getId()); + launcher.setDockerNetwork(compInstance.getCompSpec().getConfiguration() + .getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK)); + String domain = compInstance.getComponent().getScheduler().getConfig() + .get(RegistryConstants.KEY_DNS_DOMAIN); + String hostname; + if (domain == null || domain.isEmpty()) { + hostname = MessageFormat + .format("{0}.{1}.{2}", compInstance.getCompInstanceName(), + service.getName(), RegistryUtils.currentUser()); + } else { + hostname = MessageFormat + .format("{0}.{1}.{2}.{3}", compInstance.getCompInstanceName(), + service.getName(), RegistryUtils.currentUser(), domain); + } + launcher.setDockerHostname(hostname); + launcher.setRunPrivilegedContainer( + compInstance.getCompSpec().getRunPrivilegedContainer()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java new file mode 100644 index 00000000000..01f7b209aae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.tarball; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; + +import java.io.IOException; +import java.nio.file.Paths; + +public class TarballClientProvider extends AbstractClientProvider + implements YarnServiceConstants { + + public TarballClientProvider() { + } + + @Override + public void validateArtifact(Artifact artifact, FileSystem fs) + throws IOException { + if (artifact == null) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_INVALID); + } + if (StringUtils.isEmpty(artifact.getId())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); + } + Path p = new Path(artifact.getId()); + if (!fs.exists(p)) { + throw new IllegalArgumentException( "Artifact tarball does not exist " + + artifact.getId()); + } + } + + @Override + protected void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException { + // validate dest_file is not absolute + if (Paths.get(configFile.getDestFile()).isAbsolute()) { + throw new IllegalArgumentException( + "Dest_file must not be absolute path: " + configFile.getDestFile()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java new file mode 100644 index 00000000000..9d81f66e6b2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.tarball; + +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; + +public class TarballProviderFactory extends ProviderFactory { + private static final ProviderFactory FACTORY = new + TarballProviderFactory(); + + private TarballProviderFactory() { + } + + private static class Client { + static final AbstractClientProvider PROVIDER = new TarballClientProvider(); + } + + private static class Server { + static final ProviderService PROVIDER = new TarballProviderService(); + } + + @Override + public AbstractClientProvider createClientProvider() { + return Client.PROVIDER; + } + + @Override + public ProviderService createServerProvider() { + return Server.PROVIDER; + } + + public static ProviderFactory getInstance() { + return FACTORY; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java new file mode 100644 index 00000000000..9f29c8be31b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.tarball; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.AbstractProviderService; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; + +import java.io.IOException; + +public class TarballProviderService extends AbstractProviderService { + + @Override + public void processArtifact(AbstractLauncher launcher, + ComponentInstance instance, SliderFileSystem fileSystem, + Service service) + throws IOException { + Path artifact = new Path(instance.getCompSpec().getArtifact().getId()); + if (!fileSystem.isFile(artifact)) { + throw new IOException( + "Package doesn't exist as a resource: " + artifact.toString()); + } + log.info("Adding resource {}", artifact.toString()); + LocalResourceType type = LocalResourceType.ARCHIVE; + LocalResource packageResource = fileSystem.createAmResource(artifact, type); + launcher.addLocalResource(APP_LIB_DIR, packageResource); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java new file mode 100644 index 00000000000..56634f678c9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.registry; + +/** + * These are constants unique to the Slider AM + */ +public class CustomRegistryConstants { + + public static final String MANAGEMENT_REST_API = + "classpath:org.apache.slider.management"; + + public static final String REGISTRY_REST_API = + "classpath:org.apache.slider.registry"; + + public static final String PUBLISHER_REST_API = + "classpath:org.apache.slider.publisher"; + + public static final String PUBLISHER_CONFIGURATIONS_API = + "classpath:org.apache.slider.publisher.configurations"; + + public static final String PUBLISHER_EXPORTS_API = + "classpath:org.apache.slider.publisher.exports"; + + public static final String PUBLISHER_DOCUMENTS_API = + "classpath:org.apache.slider.publisher.documents"; + + public static final String AGENT_SECURE_REST_API = + "classpath:org.apache.slider.agents.secure"; + + public static final String AGENT_ONEWAY_REST_API = + "classpath:org.apache.slider.agents.oneway"; + + public static final String AM_IPC_PROTOCOL = + "classpath:org.apache.slider.appmaster.ipc"; + + public static final String AM_REST_BASE = + "classpath:org.apache.slider.client.rest"; + + public static final String WEB_UI = "http://"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java new file mode 100644 index 00000000000..cecca5f6cf2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.registry; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.registry.client.api.BindFlags; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; + +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId; +import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.join; + +/** + * Registry view for providers. This tracks where the service + * is registered, offers access to the record and other things. + */ +public class YarnRegistryViewForProviders { + private static final Logger LOG = + LoggerFactory.getLogger(YarnRegistryViewForProviders.class); + + private final RegistryOperations registryOperations; + private final String user; + private final String serviceClass; + private final String instanceName; + /** + * Record used where the service registered itself. + * Null until the service is registered + */ + private ServiceRecord selfRegistration; + + /** + * Path where record was registered. + * Null until the service is registered + */ + private String selfRegistrationPath; + + public YarnRegistryViewForProviders(RegistryOperations registryOperations, + String user, + String serviceClass, + String instanceName, + ApplicationAttemptId applicationAttemptId) { + Preconditions.checkArgument(registryOperations != null, + "null registry operations"); + Preconditions.checkArgument(user != null, "null user"); + Preconditions.checkArgument(ServiceUtils.isSet(serviceClass), + "unset service class"); + Preconditions.checkArgument(ServiceUtils.isSet(instanceName), + "instanceName"); + Preconditions.checkArgument(applicationAttemptId != null, + "null applicationAttemptId"); + this.registryOperations = registryOperations; + this.user = user; + this.serviceClass = serviceClass; + this.instanceName = instanceName; + } + + public String getUser() { + return user; + } + + + private void setSelfRegistration(ServiceRecord selfRegistration) { + this.selfRegistration = selfRegistration; + } + + /** + * Get the path to where the service has registered itself. + * Null until the service is registered + * @return the service registration path. + */ + public String getSelfRegistrationPath() { + return selfRegistrationPath; + } + + /** + * Get the absolute path to where the service has registered itself. + * This includes the base registry path + * Null until the service is registered + * @return the service registration path. + */ + public String getAbsoluteSelfRegistrationPath() { + if (selfRegistrationPath == null) { + return null; + } + String root = registryOperations.getConfig().getTrimmed( + RegistryConstants.KEY_REGISTRY_ZK_ROOT, + RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); + return RegistryPathUtils.join(root, selfRegistrationPath); + } + + /** + * Add a component under the slider name/entry. + * @param componentName component name + * @param record record to put + * @throws IOException + */ + public void putComponent(String componentName, + ServiceRecord record) throws + IOException { + putComponent(serviceClass, instanceName, + componentName, + record); + } + + /** + * Add a component. + * @param serviceClass service class to use under ~user + * @param componentName component name + * @param record record to put + * @throws IOException + */ + public void putComponent(String serviceClass, + String serviceName, + String componentName, + ServiceRecord record) throws IOException { + String path = RegistryUtils.componentPath( + user, serviceClass, serviceName, componentName); + registryOperations.mknode(RegistryPathUtils.parentOf(path), true); + registryOperations.bind(path, record, BindFlags.OVERWRITE); + } + + /** + * Get a component. + * @param componentName component name + * @return the service record + * @throws IOException + */ + public ServiceRecord getComponent(String componentName) throws IOException { + String path = RegistryUtils.componentPath( + user, serviceClass, instanceName, componentName); + LOG.info("Resolving path {}", path); + return registryOperations.resolve(path); + } + + /** + * List components. + * @return a list of components + * @throws IOException + */ + public List listComponents() throws IOException { + String path = RegistryUtils.componentListPath( + user, serviceClass, instanceName); + return registryOperations.list(path); + } + + /** + * Add a service under a path, optionally purging any history. + * @param username user + * @param serviceClass service class to use under ~user + * @param serviceName name of the service + * @param record service record + * @param deleteTreeFirst perform recursive delete of the path first. + * @return the path the service was created at + * @throws IOException + */ + public String putService(String username, + String serviceClass, + String serviceName, + ServiceRecord record, + boolean deleteTreeFirst) throws IOException { + String path = RegistryUtils.servicePath( + username, serviceClass, serviceName); + if (deleteTreeFirst) { + registryOperations.delete(path, true); + } + registryOperations.mknode(RegistryPathUtils.parentOf(path), true); + registryOperations.bind(path, record, BindFlags.OVERWRITE); + return path; + } + + /** + * Add a service under a path for the current user. + * @param record service record + * @param deleteTreeFirst perform recursive delete of the path first + * @return the path the service was created at + * @throws IOException + */ + public String registerSelf( + ServiceRecord record, + boolean deleteTreeFirst) throws IOException { + selfRegistrationPath = + putService(user, serviceClass, instanceName, record, deleteTreeFirst); + setSelfRegistration(record); + return selfRegistrationPath; + } + + /** + * Delete a component. + * @param containerId component name + * @throws IOException + */ + public void deleteComponent(ComponentInstanceId instanceId, + String containerId) throws IOException { + String path = RegistryUtils.componentPath( + user, serviceClass, instanceName, + containerId); + LOG.info(instanceId + ": Deleting registry path " + path); + registryOperations.delete(path, false); + } + + /** + * Delete the children of a path -but not the path itself. + * It is not an error if the path does not exist + * @param path path to delete + * @param recursive flag to request recursive deletes + * @throws IOException IO problems + */ + public void deleteChildren(String path, boolean recursive) throws IOException { + List childNames = null; + try { + childNames = registryOperations.list(path); + } catch (PathNotFoundException e) { + return; + } + for (String childName : childNames) { + String child = join(path, childName); + registryOperations.delete(child, recursive); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java new file mode 100644 index 00000000000..cf4e836c01c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.commons.configuration2.SubsetConfiguration; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.MetricsSink; +import org.apache.hadoop.metrics2.MetricsTag; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Write the metrics to a ATSv2. Generally, this class is instantiated via + * hadoop-metrics2 property files. Specifically, you would create this class by + * adding the following to by This would actually be set as: + * [prefix].sink.[some instance name].class + * =org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink + * , where prefix is "atsv2": and some instance name is + * just any unique name, so properties can be differentiated if there are + * multiple sinks of the same type created + */ +public class ServiceMetricsSink implements MetricsSink { + + private static final Logger log = + LoggerFactory.getLogger(ServiceMetricsSink.class); + + private ServiceTimelinePublisher serviceTimelinePublisher; + + public ServiceMetricsSink() { + + } + + public ServiceMetricsSink(ServiceTimelinePublisher publisher) { + serviceTimelinePublisher = publisher; + } + + /** + * Publishes service and component metrics to ATS. + */ + @Override + public void putMetrics(MetricsRecord record) { + if (serviceTimelinePublisher.isStopped()) { + log.warn("ServiceTimelinePublisher has stopped. " + + "Not publishing any more metrics to ATS."); + return; + } + + boolean isServiceMetrics = false; + boolean isComponentMetrics = false; + String appId = null; + for (MetricsTag tag : record.tags()) { + if (tag.name().equals("type") && tag.value().equals("service")) { + isServiceMetrics = true; + } else if (tag.name().equals("type") && tag.value().equals("component")) { + isComponentMetrics = true; + break; // if component metrics, no more information required from tag so + // break the loop + } else if (tag.name().equals("appId")) { + appId = tag.value(); + } + } + + if (isServiceMetrics && appId != null) { + if (log.isDebugEnabled()) { + log.debug("Publishing service metrics. " + record); + } + serviceTimelinePublisher.publishMetrics(record.metrics(), appId, + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(), + record.timestamp()); + } else if (isComponentMetrics) { + if (log.isDebugEnabled()) { + log.debug("Publishing Component metrics. " + record); + } + serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(), + ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp()); + } + } + + @Override + public void init(SubsetConfiguration conf) { + } + + @Override + public void flush() { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java new file mode 100644 index 00000000000..d5c95394aa4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +/** + * Slider entities that are published to ATS. + */ +public enum ServiceTimelineEntityType { + /** + * Used for publishing service entity information. + */ + SERVICE_ATTEMPT, + + /** + * Used for publishing component entity information. + */ + COMPONENT, + + /** + * Used for publishing component instance entity information. + */ + COMPONENT_INSTANCE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java new file mode 100644 index 00000000000..6c3428a748d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +/** + * Events that are used to store in ATS. + */ +public enum ServiceTimelineEvent { + SERVICE_ATTEMPT_REGISTERED, + + SERVICE_ATTEMPT_UNREGISTERED, + + COMPONENT_INSTANCE_REGISTERED, + + COMPONENT_INSTANCE_UNREGISTERED, + + COMPONENT_INSTANCE_IP_HOST_UPDATE, + + COMPONENT_INSTANCE_BECOME_READY +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java new file mode 100644 index 00000000000..78a71713d90 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +/** + * Constants which are stored as key in ATS + */ +public final class ServiceTimelineMetricsConstants { + + public static final String URI = "URI"; + + public static final String NAME = "NAME"; + + public static final String STATE = "STATE"; + + public static final String EXIT_STATUS_CODE = "EXIT_STATUS_CODE"; + + public static final String EXIT_REASON = "EXIT_REASON"; + + public static final String DIAGNOSTICS_INFO = "DIAGNOSTICS_INFO"; + + public static final String LAUNCH_TIME = "LAUNCH_TIME"; + + public static final String QUICK_LINKS = "QUICK_LINKS"; + + public static final String LAUNCH_COMMAND = "LAUNCH_COMMAND"; + + public static final String TOTAL_CONTAINERS = "NUMBER_OF_CONTAINERS"; + + public static final String RUNNING_CONTAINERS = + "NUMBER_OF_RUNNING_CONTAINERS"; + + /** + * Artifacts constants. + */ + public static final String ARTIFACT_ID = "ARTIFACT_ID"; + + public static final String ARTIFACT_TYPE = "ARTIFACT_TYPE"; + + public static final String ARTIFACT_URI = "ARTIFACT_URI"; + + /** + * Resource constants. + */ + public static final String RESOURCE_CPU = "RESOURCE_CPU"; + + public static final String RESOURCE_MEMORY = "RESOURCE_MEMORY"; + + public static final String RESOURCE_PROFILE = "RESOURCE_PROFILE"; + + /** + * component instance constants. + */ + public static final String IP = "IP"; + + public static final String HOSTNAME = "HOSTNAME"; + + public static final String BARE_HOST = "BARE_HOST"; + + public static final String COMPONENT_NAME = "COMPONENT_NAME"; + + public static final String COMPONENT_INSTANCE_NAME = "COMPONENT_INSTANCE_NAME"; + + /** + * component constants. + */ + public static final String DEPENDENCIES = "DEPENDENCIES"; + + public static final String DESCRIPTION = "DESCRIPTION"; + + public static final String RUN_PRIVILEGED_CONTAINER = + "RUN_PRIVILEGED_CONTAINER"; + + public static final String PLACEMENT_POLICY = "PLACEMENT_POLICY"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java new file mode 100644 index 00000000000..c5229868005 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java @@ -0,0 +1,381 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; +import org.apache.hadoop.yarn.client.api.TimelineV2Client; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.api.records.*; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.util.timeline.TimelineUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.api.records.ContainerState.READY; +import static org.apache.hadoop.yarn.service.api.records.ContainerState.STOPPED; +import static org.apache.hadoop.yarn.service.timelineservice.ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO; + +/** + * A single service that publishes all the Timeline Entities. + */ +public class ServiceTimelinePublisher extends CompositeService { + + // Number of bytes of config which can be published in one shot to ATSv2. + public static final int ATS_CONFIG_PUBLISH_SIZE_BYTES = 10 * 1024; + + private TimelineV2Client timelineClient; + + private volatile boolean stopped = false; + + private static final Logger log = + LoggerFactory.getLogger(ServiceTimelinePublisher.class); + + @Override + protected void serviceInit(org.apache.hadoop.conf.Configuration configuration) + throws Exception { + addService(timelineClient); + super.serviceInit(configuration); + } + + + @Override + protected void serviceStop() throws Exception { + stopped = true; + super.serviceStop(); + } + + public boolean isStopped() { + return stopped; + } + + public ServiceTimelinePublisher(TimelineV2Client client) { + super(ServiceTimelinePublisher.class.getName()); + timelineClient = client; + } + + public void serviceAttemptRegistered(Service service, + org.apache.hadoop.conf.Configuration systemConf) { + long currentTimeMillis = service.getLaunchTime() == null + ? System.currentTimeMillis() : service.getLaunchTime().getTime(); + + TimelineEntity entity = createServiceAttemptEntity(service.getId()); + entity.setCreatedTime(currentTimeMillis); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.NAME, service.getName()); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + ServiceState.STARTED.toString()); + entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME, + currentTimeMillis); + entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS, + service.getQuicklinks()); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent startEvent = new TimelineEvent(); + startEvent.setId(ServiceTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString()); + startEvent.setTimestamp(currentTimeMillis); + entity.addEvent(startEvent); + + // publish before configurations published + putEntity(entity); + + // publish system config - YarnConfiguration + populateTimelineEntity(systemConf.iterator(), service.getId(), + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); + // publish container conf + publishContainerConf(service.getConfiguration(), service.getId(), + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); + + // publish component as separate entity. + publishComponents(service.getComponents()); + } + + public void serviceAttemptUpdated(Service service) { + TimelineEntity entity = createServiceAttemptEntity(service.getId()); + entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS, + service.getQuicklinks()); + putEntity(entity); + } + + public void serviceAttemptUnregistered(ServiceContext context, + String diagnostics) { + TimelineEntity entity = createServiceAttemptEntity( + context.attemptId.getApplicationId().toString()); + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + FinalApplicationStatus.ENDED); + entityInfos.put(DIAGNOSTICS_INFO, diagnostics); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent finishEvent = new TimelineEvent(); + finishEvent + .setId(ServiceTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString()); + finishEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(finishEvent); + + putEntity(entity); + } + + public void componentInstanceStarted(Container container, + ComponentInstance instance) { + + TimelineEntity entity = createComponentInstanceEntity(container.getId()); + entity.setCreatedTime(container.getLaunchTime().getTime()); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.BARE_HOST, + container.getBareHost()); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + container.getState().toString()); + entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME, + container.getLaunchTime().getTime()); + entityInfos.put(ServiceTimelineMetricsConstants.COMPONENT_NAME, + instance.getCompName()); + entityInfos.put(ServiceTimelineMetricsConstants.COMPONENT_INSTANCE_NAME, + instance.getCompInstanceName()); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent startEvent = new TimelineEvent(); + startEvent + .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_REGISTERED.toString()); + startEvent.setTimestamp(container.getLaunchTime().getTime()); + entity.addEvent(startEvent); + + putEntity(entity); + } + + public void componentInstanceFinished(ComponentInstance instance, + int exitCode, String diagnostics) { + TimelineEntity entity = createComponentInstanceEntity( + instance.getContainer().getId().toString()); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.EXIT_STATUS_CODE, + exitCode); + entityInfos.put(DIAGNOSTICS_INFO, diagnostics); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, STOPPED); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent startEvent = new TimelineEvent(); + startEvent + .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_UNREGISTERED.toString()); + startEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(startEvent); + + putEntity(entity); + } + + public void componentInstanceIPHostUpdated(Container container) { + TimelineEntity entity = createComponentInstanceEntity(container.getId()); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.IP, container.getIp()); + entityInfos.put(ServiceTimelineMetricsConstants.HOSTNAME, + container.getHostname()); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + container.getState().toString()); + entity.addInfo(entityInfos); + + TimelineEvent updateEvent = new TimelineEvent(); + updateEvent.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_IP_HOST_UPDATE + .toString()); + updateEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(updateEvent); + + putEntity(entity); + } + + public void componentInstanceBecomeReady(Container container) { + TimelineEntity entity = createComponentInstanceEntity(container.getId()); + Map entityInfo = new HashMap<>(); + entityInfo.put(ServiceTimelineMetricsConstants.STATE, READY); + entity.addInfo(entityInfo); + TimelineEvent updateEvent = new TimelineEvent(); + updateEvent.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_BECOME_READY + .toString()); + updateEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(updateEvent); + putEntity(entity); + } + + private void publishComponents(List components) { + long currentTimeMillis = System.currentTimeMillis(); + for (Component component : components) { + TimelineEntity entity = createComponentEntity(component.getName()); + entity.setCreatedTime(currentTimeMillis); + + // create info keys + Map entityInfos = new HashMap(); + if (component.getArtifact() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.ARTIFACT_ID, + component.getArtifact().getId()); + entityInfos.put(ServiceTimelineMetricsConstants.ARTIFACT_TYPE, + component.getArtifact().getType().toString()); + } + + if (component.getResource() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_CPU, + component.getResource().getCpus()); + entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_MEMORY, + component.getResource().getMemory()); + if (component.getResource().getProfile() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_PROFILE, + component.getResource().getProfile()); + } + } + + if (component.getLaunchCommand() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_COMMAND, + component.getLaunchCommand()); + } + entityInfos.put(ServiceTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER, + component.getRunPrivilegedContainer().toString()); + if (component.getPlacementPolicy() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.PLACEMENT_POLICY, + component.getPlacementPolicy().getLabel()); + } + entity.addInfo(entityInfos); + + putEntity(entity); + + // publish container specific configurations + publishContainerConf(component.getConfiguration(), component.getName(), + ServiceTimelineEntityType.COMPONENT.toString()); + } + } + + private void publishContainerConf(Configuration configuration, + String entityId, String entityType) { + populateTimelineEntity(configuration.getEnv().entrySet().iterator(), + entityId, entityType); + + for (ConfigFile configFile : configuration.getFiles()) { + populateTimelineEntity(configFile.getProperties().entrySet().iterator(), + entityId, entityType); + } + } + + private void populateTimelineEntity(Iterator> iterator, + String entityId, String entityType) { + int configSize = 0; + TimelineEntity entity = createTimelineEntity(entityId, entityType); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + int size = entry.getKey().length() + entry.getValue().length(); + configSize += size; + // Configs are split into multiple entities if they exceed 100kb in size. + if (configSize > ATS_CONFIG_PUBLISH_SIZE_BYTES) { + if (entity.getConfigs().size() > 0) { + putEntity(entity); + entity = createTimelineEntity(entityId, entityType); + } + configSize = size; + } + entity.addConfig(entry.getKey(), entry.getValue()); + } + if (configSize > 0) { + putEntity(entity); + } + } + + /** + * Called from ServiceMetricsSink at regular interval of time. + * @param metrics of service or components + * @param entityId Id of entity + * @param entityType Type of entity + * @param timestamp + */ + public void publishMetrics(Iterable metrics, String entityId, + String entityType, long timestamp) { + TimelineEntity entity = createTimelineEntity(entityId, entityType); + Set entityMetrics = new HashSet(); + for (AbstractMetric metric : metrics) { + TimelineMetric timelineMetric = new TimelineMetric(); + timelineMetric.setId(metric.name()); + timelineMetric.addValue(timestamp, metric.value()); + entityMetrics.add(timelineMetric); + } + entity.setMetrics(entityMetrics); + putEntity(entity); + } + + private TimelineEntity createServiceAttemptEntity(String serviceId) { + TimelineEntity entity = createTimelineEntity(serviceId, + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); + return entity; + } + + private TimelineEntity createComponentInstanceEntity(String instanceId) { + TimelineEntity entity = createTimelineEntity(instanceId, + ServiceTimelineEntityType.COMPONENT_INSTANCE.toString()); + return entity; + } + + private TimelineEntity createComponentEntity(String componentId) { + TimelineEntity entity = createTimelineEntity(componentId, + ServiceTimelineEntityType.COMPONENT.toString()); + return entity; + } + + private TimelineEntity createTimelineEntity(String entityId, + String entityType) { + TimelineEntity entity = new TimelineEntity(); + entity.setId(entityId); + entity.setType(entityType); + return entity; + } + + private void putEntity(TimelineEntity entity) { + try { + if (log.isDebugEnabled()) { + log.debug("Publishing the entity " + entity + ", JSON-style content: " + + TimelineUtils.dumpTimelineRecordtoJSON(entity)); + } + if (timelineClient != null) { + timelineClient.putEntitiesAsync(entity); + } else { + log.error("Seems like client has been removed before the entity " + + "could be published for " + entity); + } + } catch (Exception e) { + log.error("Error when publishing entity " + entity, e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java new file mode 100644 index 00000000000..72f7842b836 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * ATS implementation + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java new file mode 100644 index 00000000000..2607c084be8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.codehaus.jackson.JsonGenerationException; +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.map.JsonMappingException; + +import java.io.IOException; + +/** + * Persistence of {@link SerializedApplicationReport} + * + */ +public class ApplicationReportSerDeser + extends JsonSerDeser { + public ApplicationReportSerDeser() { + super(SerializedApplicationReport.class); + } + + + private static final ApplicationReportSerDeser + staticinstance = new ApplicationReportSerDeser(); + + /** + * Convert an instance to a JSON string -sync access to a shared ser/deser + * object instance + * @param instance object to convert + * @return a JSON string description + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public static String toString(SerializedApplicationReport instance) + throws IOException, JsonGenerationException, JsonMappingException { + synchronized (staticinstance) { + return staticinstance.toJson(instance); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java new file mode 100644 index 00000000000..86896b2b694 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.binding.RegistryTypeUtils; +import org.apache.hadoop.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants; +import org.apache.hadoop.registry.client.types.Endpoint; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.encodeForRegistry; +import static org.apache.hadoop.registry.client.binding.RegistryUtils.convertUsername; +import static org.apache.hadoop.registry.client.binding.RegistryUtils.getCurrentUsernameUnencoded; +import static org.apache.hadoop.registry.client.binding.RegistryUtils.servicePath; + +/** + * Generic code to get the URLs for clients via the registry + */ +public class ClientRegistryBinder { + private static final Logger log = + LoggerFactory.getLogger(ClientRegistryBinder.class); + + private final RegistryOperations operations; + + public ClientRegistryBinder(RegistryOperations operations) { + this.operations = operations; + } + + /** + * Buld the user path -switches to the system path if the user is "". + * It also cross-converts the username to ascii via punycode + * @param username username or "" + * @return the path to the user + */ + public static String homePathForUser(String username) { + Preconditions.checkArgument(username != null, "null user"); + + // catch recursion + if (username.startsWith(RegistryConstants.PATH_USERS)) { + return username; + } + + if (username.isEmpty()) { + return RegistryConstants.PATH_SYSTEM_SERVICES; + } + + // convert username to registry name + String convertedName = convertUsername(username); + + return RegistryPathUtils.join(RegistryConstants.PATH_USERS, + encodeForRegistry(convertedName)); + } + + /** + * Get the current username, before any encoding has been applied. + * @return the current user from the kerberos identity, falling back + * to the user and/or env variables. + */ + public static String currentUsernameUnencoded() { + String env_hadoop_username = System.getenv( + RegistryInternalConstants.HADOOP_USER_NAME); + return getCurrentUsernameUnencoded(env_hadoop_username); + } + + /** + * Qualify a user. + *

    + *
  1. "~" maps to user home path home
  2. + *
  3. "~user" maps to /users/$user
  4. + *
  5. "/" maps to /services/
  6. + *
+ * @param user the username + * @return the base path + */ + public static String qualifyUser(String user) { + // qualify the user + String t = user.trim(); + if (t.startsWith("/")) { + // already resolved + return t; + } else if (t.equals("~")) { + // self + return currentUsernameUnencoded(); + } else if (t.startsWith("~")) { + // another user + // convert username to registry name + String convertedName = convertUsername(t.substring(1)); + + return RegistryPathUtils.join(RegistryConstants.PATH_USERS, + encodeForRegistry(convertedName)); + } else { + return "/" + t; + } + } + + /** + * Look up an external REST API + * @param user user which will be qualified as per {@link #qualifyUser(String)} + * @param serviceClass service class + * @param instance instance name + * @param api API + * @return the API, or an exception is raised. + * @throws IOException + */ + public String lookupExternalRestAPI(String user, + String serviceClass, + String instance, + String api) + throws IOException { + String qualified = qualifyUser(user); + String path = servicePath(qualified, serviceClass, instance); + String restAPI = resolveExternalRestAPI(api, path); + if (restAPI == null) { + throw new PathNotFoundException(path + " API " + api); + } + return restAPI; + } + + /** + * Resolve a service record then return an external REST API exported it. + * + * @param api API to resolve + * @param path path of the service record + * @return null if the record exists but the API is absent or it has no + * REST endpoints. + * @throws IOException resolution problems, as covered in + * {@link RegistryOperations#resolve(String)} + */ + protected String resolveExternalRestAPI(String api, String path) throws + IOException { + ServiceRecord record = operations.resolve(path); + return lookupRestAPI(record, api, true); + } + + /** + * Look up an external REST API endpoint + * @param record service record + * @param api URI of api + * @param external flag to indicate this is an external record + * @return the first endpoint of the implementation, or null if there + * is no entry for the API, implementation or it's the wrong type. + */ + public static String lookupRestAPI(ServiceRecord record, + String api, boolean external) throws InvalidRecordException { + try { + String url = null; + Endpoint endpoint = getEndpoint(record, api, external); + List addresses = + RegistryTypeUtils.retrieveAddressesUriType(endpoint); + if (addresses != null && !addresses.isEmpty()) { + url = addresses.get(0); + } + return url; + } catch (InvalidRecordException e) { + log.debug("looking for API {}", api, e); + return null; + } + } + + /** + * Get an endpont by API + * @param record service record + * @param api API + * @param external flag to indicate this is an external record + * @return the endpoint or null + */ + public static Endpoint getEndpoint(ServiceRecord record, + String api, + boolean external) { + return external ? record.getExternalEndpoint(api) + : record.getInternalEndpoint(api); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java new file mode 100644 index 00000000000..9f0e5d40a57 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import java.io.Serializable; +import java.util.Comparator; + +/** + * Some general comparators + */ +public class Comparators { + + public static class LongComparator implements Comparator, Serializable { + @Override + public int compare(Long o1, Long o2) { + return o1.compareTo(o2); + } + } + + public static class InvertedLongComparator + implements Comparator, Serializable { + @Override + public int compare(Long o1, Long o2) { + return o2.compareTo(o1); + } + } + + /** + * Little template class to reverse any comparitor + * @param the type that is being compared + */ + public static class ComparatorReverser implements Comparator, + Serializable { + + final Comparator instance; + + public ComparatorReverser(Comparator instance) { + this.instance = instance; + } + + @Override + public int compare(CompareType first, CompareType second) { + return instance.compare(second, first); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java new file mode 100644 index 00000000000..fe8cce85434 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.StringWriter; +import java.net.URL; +import java.util.Map; + +/** + * Methods to aid in config, both in the Configuration class and + * with other parts of setting up Slider-initated processes. + * + * Some of the methods take an argument of a map iterable for their sources; this allows + * the same method + */ +public class ConfigHelper { + private static final Logger log = LoggerFactory.getLogger(ConfigHelper.class); + + /** + * Set an entire map full of values + * + * @param config config to patch + * @param map map of data + * @param origin origin data + */ + public static void addConfigMap(Configuration config, + Map map, + String origin) throws BadConfigException { + addConfigMap(config, map.entrySet(), origin); + } + + /** + * Set an entire map full of values + * + * @param config config to patch + * @param map map of data + * @param origin origin data + */ + public static void addConfigMap(Configuration config, + Iterable> map, + String origin) throws BadConfigException { + for (Map.Entry mapEntry : map) { + String key = mapEntry.getKey(); + String value = mapEntry.getValue(); + if (value == null) { + throw new BadConfigException("Null value for property " + key); + } + config.set(key, value, origin); + } + } + + /** + * Convert to an XML string + * @param conf configuration + * @return conf + * @throws IOException + */ + public static String toXml(Configuration conf) throws IOException { + StringWriter writer = new StringWriter(); + conf.writeXml(writer); + return writer.toString(); + } + + + /** + * Register a resource as a default resource. + * Do not attempt to use this unless you understand that the + * order in which default resources are loaded affects the outcome, + * and that subclasses of Configuration often register new default + * resources + * @param resource the resource name + * @return the URL or null + */ + public static URL registerDefaultResource(String resource) { + URL resURL = getResourceUrl(resource); + if (resURL != null) { + Configuration.addDefaultResource(resource); + } + return resURL; + } + + /** + * Load a configuration from a resource on this classpath. + * If the resource is not found, an empty configuration is returned + * @param resource the resource name + * @return the loaded configuration. + */ + public static Configuration loadFromResource(String resource) { + Configuration conf = new Configuration(false); + URL resURL = getResourceUrl(resource); + if (resURL != null) { + log.debug("loaded resources from {}", resURL); + conf.addResource(resource); + } else{ + log.debug("failed to find {} on the classpath", resource); + } + return conf; + + } + + /** + * Get the URL to a resource, null if not on the CP + * @param resource resource to look for + * @return the URL or null + */ + public static URL getResourceUrl(String resource) { + return ConfigHelper.class.getClassLoader() + .getResource(resource); + } + + /** + * This goes through the keyset of one configuration and retrieves each value + * from a value source -a different or the same configuration. This triggers + * the property resolution process of the value, resolving any variables against + * in-config or inherited configurations + * @param keysource source of keys + * @param valuesource the source of values + * @return a new configuration where foreach key in keysource, get(key)==valuesource.get(key) + */ + public static Configuration resolveConfiguration( + Iterable> keysource, + Configuration valuesource) { + Configuration result = new Configuration(false); + for (Map.Entry entry : keysource) { + String key = entry.getKey(); + String value = valuesource.get(key); + Preconditions.checkState(value != null, + "no reference for \"%s\" in values", key); + result.set(key, value); + } + return result; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigUtils.java new file mode 100644 index 00000000000..a969be93629 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigUtils.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class ConfigUtils { + public static final String TEMPLATE_FILE = "template.file"; + + public static String replaceProps(Map config, String content) { + Map tokens = new HashMap<>(); + for (Entry entry : config.entrySet()) { + tokens.put("${" + entry.getKey() + "}", entry.getValue()); + tokens.put("{{" + entry.getKey() + "}}", entry.getValue()); + } + String value = content; + for (Map.Entry token : tokens.entrySet()) { + value = value.replaceAll(Pattern.quote(token.getKey()), + Matcher.quoteReplacement(token.getValue())); + } + return value; + } + + public static Map replacePropsInConfig( + Map config, Map env) { + Map tokens = new HashMap<>(); + for (Entry entry : env.entrySet()) { + tokens.put("${" + entry.getKey() + "}", entry.getValue()); + } + Map newConfig = new HashMap<>(); + for (Entry entry : config.entrySet()) { + String value = entry.getValue(); + for (Map.Entry token : tokens.entrySet()) { + value = value.replaceAll(Pattern.quote(token.getKey()), + Matcher.quoteReplacement(token.getValue())); + } + newConfig.put(entry.getKey(), entry.getValue()); + } + return newConfig; + } + + public static void prepConfigForTemplateOutputter(ConfigFormat configFormat, + Map config, SliderFileSystem fileSystem, + String clusterName, String fileName) throws IOException { + if (!configFormat.equals(ConfigFormat.TEMPLATE)) { + return; + } + Path templateFile = null; + if (config.containsKey(TEMPLATE_FILE)) { + templateFile = fileSystem.buildResourcePath(config.get(TEMPLATE_FILE)); + if (!fileSystem.isFile(templateFile)) { + templateFile = fileSystem.buildResourcePath(clusterName, + config.get(TEMPLATE_FILE)); + } + if (!fileSystem.isFile(templateFile)) { + throw new IOException("config specified template file " + config + .get(TEMPLATE_FILE) + " but " + templateFile + " doesn't exist"); + } + } + if (templateFile == null && fileName != null) { + templateFile = fileSystem.buildResourcePath(fileName); + if (!fileSystem.isFile(templateFile)) { + templateFile = fileSystem.buildResourcePath(clusterName, + fileName); + } + } + if (fileSystem.isFile(templateFile)) { + config.put("content", fileSystem.cat(templateFile)); + } else { + config.put("content", ""); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java new file mode 100644 index 00000000000..6b9d4d51296 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java @@ -0,0 +1,521 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.exceptions.ErrorStrings; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.util.Records; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class CoreFileSystem { + private static final Logger + log = LoggerFactory.getLogger(CoreFileSystem.class); + + private static final String UTF_8 = "UTF-8"; + + protected final FileSystem fileSystem; + protected final Configuration configuration; + + public CoreFileSystem(FileSystem fileSystem, Configuration configuration) { + Preconditions.checkNotNull(fileSystem, + "Cannot create a CoreFileSystem with a null FileSystem"); + Preconditions.checkNotNull(configuration, + "Cannot create a CoreFileSystem with a null Configuration"); + this.fileSystem = fileSystem; + this.configuration = configuration; + } + + public CoreFileSystem(Configuration configuration) throws IOException { + Preconditions.checkNotNull(configuration, + "Cannot create a CoreFileSystem with a null Configuration"); + this.fileSystem = FileSystem.get(configuration); + this.configuration = configuration; + } + + /** + * Get the temp path for this cluster + * @param clustername name of the cluster + * @return path for temp files (is not purged) + */ + public Path getTempPathForCluster(String clustername) { + Path clusterDir = buildClusterDirPath(clustername); + return new Path(clusterDir, YarnServiceConstants.TMP_DIR_PREFIX); + } + + /** + * Returns the underlying FileSystem for this object. + * + * @return filesystem + */ + public FileSystem getFileSystem() { + return fileSystem; + } + + @Override + public String toString() { + final StringBuilder sb = + new StringBuilder("CoreFileSystem{"); + sb.append("fileSystem=").append(fileSystem.getUri()); + sb.append('}'); + return sb.toString(); + } + + /** + * Build up the path string for a cluster instance -no attempt to + * create the directory is made + * + * @param clustername name of the cluster + * @return the path for persistent data + */ + public Path buildClusterDirPath(String clustername) { + Preconditions.checkNotNull(clustername); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.SERVICES_DIRECTORY + "/" + clustername); + } + + + /** + * Build up the path string for keytab install location -no attempt to + * create the directory is made + * + * @return the path for keytab + */ + public Path buildKeytabInstallationDirPath(String keytabFolder) { + Preconditions.checkNotNull(keytabFolder); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.KEYTAB_DIR + "/" + keytabFolder); + } + + /** + * Build up the path string for keytab install location -no attempt to + * create the directory is made + * + * @return the path for keytab installation location + */ + public Path buildKeytabPath(String keytabDir, String keytabName, String clusterName) { + Path homePath = getHomeDirectory(); + Path baseKeytabDir; + if (keytabDir != null) { + baseKeytabDir = new Path(homePath, keytabDir); + } else { + baseKeytabDir = new Path(buildClusterDirPath(clusterName), + YarnServiceConstants.KEYTAB_DIR); + } + return keytabName == null ? baseKeytabDir : + new Path(baseKeytabDir, keytabName); + } + + /** + * Build up the path string for resource install location -no attempt to + * create the directory is made + * + * @return the path for resource + */ + public Path buildResourcePath(String resourceFolder) { + Preconditions.checkNotNull(resourceFolder); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.RESOURCE_DIR + "/" + resourceFolder); + } + + /** + * Build up the path string for resource install location -no attempt to + * create the directory is made + * + * @return the path for resource + */ + public Path buildResourcePath(String dirName, String fileName) { + Preconditions.checkNotNull(dirName); + Preconditions.checkNotNull(fileName); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.RESOURCE_DIR + "/" + dirName + "/" + fileName); + } + + /** + * Create a directory with the given permissions. + * + * @param dir directory + * @param clusterPerms cluster permissions + * @throws IOException IO problem + * @throws BadClusterStateException any cluster state problem + */ + @SuppressWarnings("deprecation") + public void createWithPermissions(Path dir, FsPermission clusterPerms) throws + IOException, + BadClusterStateException { + if (fileSystem.isFile(dir)) { + // HADOOP-9361 shows some filesystems don't correctly fail here + throw new BadClusterStateException( + "Cannot create a directory over a file %s", dir); + } + log.debug("mkdir {} with perms {}", dir, clusterPerms); + //no mask whatoever + fileSystem.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000"); + fileSystem.mkdirs(dir, clusterPerms); + //and force set it anyway just to make sure + fileSystem.setPermission(dir, clusterPerms); + } + + /** + * Verify that the cluster directory is not present + * + * @param clustername name of the cluster + * @param clusterDirectory actual directory to look for + * @throws IOException trouble with FS + * @throws SliderException If the directory exists + */ + public void verifyClusterDirectoryNonexistent(String clustername, + Path clusterDirectory) + throws IOException, SliderException { + if (fileSystem.exists(clusterDirectory)) { + throw new SliderException(SliderExitCodes.EXIT_INSTANCE_EXISTS, + ErrorStrings.PRINTF_E_INSTANCE_ALREADY_EXISTS, clustername, + clusterDirectory); + } + } + /** + * Verify that the given directory is not present + * + * @param clusterDirectory actual directory to look for + * @throws IOException trouble with FS + * @throws SliderException If the directory exists + */ + public void verifyDirectoryNonexistent(Path clusterDirectory) throws + IOException, + SliderException { + if (fileSystem.exists(clusterDirectory)) { + + log.error("Dir {} exists: {}", + clusterDirectory, + listFSDir(clusterDirectory)); + throw new SliderException(SliderExitCodes.EXIT_INSTANCE_EXISTS, + ErrorStrings.PRINTF_E_INSTANCE_DIR_ALREADY_EXISTS, + clusterDirectory); + } + } + + /** + * Verify that a user has write access to a directory. + * It does this by creating then deleting a temp file + * + * @param dirPath actual directory to look for + * @throws FileNotFoundException file not found + * @throws IOException trouble with FS + * @throws BadClusterStateException if the directory is not writeable + */ + public void verifyDirectoryWriteAccess(Path dirPath) throws IOException, + SliderException { + verifyPathExists(dirPath); + Path tempFile = new Path(dirPath, "tmp-file-for-checks"); + try { + FSDataOutputStream out ; + out = fileSystem.create(tempFile, true); + IOUtils.closeStream(out); + fileSystem.delete(tempFile, false); + } catch (IOException e) { + log.warn("Failed to create file {}: {}", tempFile, e); + throw new BadClusterStateException(e, + "Unable to write to directory %s : %s", dirPath, e.toString()); + } + } + + /** + * Verify that a path exists + * @param path path to check + * @throws FileNotFoundException file not found + * @throws IOException trouble with FS + */ + public void verifyPathExists(Path path) throws IOException { + if (!fileSystem.exists(path)) { + throw new FileNotFoundException(path.toString()); + } + } + + /** + * Verify that a path exists + * @param path path to check + * @throws FileNotFoundException file not found or is not a file + * @throws IOException trouble with FS + */ + public void verifyFileExists(Path path) throws IOException { + FileStatus status = fileSystem.getFileStatus(path); + + if (!status.isFile()) { + throw new FileNotFoundException("Not a file: " + path.toString()); + } + } + + /** + * Given a path, check if it exists and is a file + * + * @param path + * absolute path to the file to check + * @return true if and only if path exists and is a file, false for all other + * reasons including if file check throws IOException + */ + public boolean isFile(Path path) { + boolean isFile = false; + try { + FileStatus status = fileSystem.getFileStatus(path); + if (status.isFile()) { + isFile = true; + } + } catch (IOException e) { + // ignore, isFile is already set to false + } + return isFile; + } + + /** + * Get the base path + * + * @return the base path optionally configured by + * {@link YarnServiceConf#YARN_SERVICE_BASE_PATH} + */ + public Path getBaseApplicationPath() { + String configuredBasePath = configuration + .get(YarnServiceConf.YARN_SERVICE_BASE_PATH, + getHomeDirectory() + "/" + YarnServiceConstants.SERVICE_BASE_DIRECTORY); + return new Path(configuredBasePath); + } + + /** + * Get slider dependency parent dir in HDFS + * + * @return the parent dir path of slider.tar.gz in HDFS + */ + public Path getDependencyPath() { + String parentDir = YarnServiceConstants.DEPENDENCY_DIR; + return new Path(String.format(parentDir, VersionInfo.getVersion())); + } + + /** + * Get slider.tar.gz absolute filepath in HDFS + * + * @return the absolute path to slider.tar.gz in HDFS + */ + public Path getDependencyTarGzip() { + Path dependencyLibAmPath = getDependencyPath(); + Path dependencyLibTarGzip = new Path( + dependencyLibAmPath.toUri().toString(), + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT); + return dependencyLibTarGzip; + } + + public Path getHomeDirectory() { + return fileSystem.getHomeDirectory(); + } + + /** + * Create an AM resource from the + * + * @param destPath dest path in filesystem + * @param resourceType resource type + * @return the local resource for AM + */ + public LocalResource createAmResource(Path destPath, LocalResourceType resourceType) throws IOException { + FileStatus destStatus = fileSystem.getFileStatus(destPath); + LocalResource amResource = Records.newRecord(LocalResource.class); + amResource.setType(resourceType); + // Set visibility of the resource + // Setting to most private option + amResource.setVisibility(LocalResourceVisibility.APPLICATION); + // Set the resource to be copied over + amResource.setResource( + URL.fromPath(fileSystem.resolvePath(destStatus.getPath()))); + // Set timestamp and length of file so that the framework + // can do basic sanity checks for the local resource + // after it has been copied over to ensure it is the same + // resource the client intended to use with the service + amResource.setTimestamp(destStatus.getModificationTime()); + amResource.setSize(destStatus.getLen()); + return amResource; + } + + /** + * Register all files under a fs path as a directory to push out + * + * @param srcDir src dir + * @param destRelativeDir dest dir (no trailing /) + * @return the map of entries + */ + public Map submitDirectory(Path srcDir, String destRelativeDir) throws IOException { + //now register each of the files in the directory to be + //copied to the destination + FileStatus[] fileset = fileSystem.listStatus(srcDir); + Map localResources = + new HashMap(fileset.length); + for (FileStatus entry : fileset) { + + LocalResource resource = createAmResource(entry.getPath(), + LocalResourceType.FILE); + String relativePath = destRelativeDir + "/" + entry.getPath().getName(); + localResources.put(relativePath, resource); + } + return localResources; + } + + /** + * Submit a JAR containing a specific class, returning + * the resource to be mapped in + * + * @param clazz class to look for + * @param subdir subdirectory (expected to end in a "/") + * @param jarName At the destination + * @return the local resource ref + * @throws IOException trouble copying to HDFS + */ + public LocalResource submitJarWithClass(Class clazz, Path tempPath, String subdir, String jarName) + throws IOException, SliderException { + File localFile = ServiceUtils.findContainingJarOrFail(clazz); + return submitFile(localFile, tempPath, subdir, jarName); + } + + /** + * Submit a local file to the filesystem references by the instance's cluster + * filesystem + * + * @param localFile filename + * @param subdir subdirectory (expected to end in a "/") + * @param destFileName destination filename + * @return the local resource ref + * @throws IOException trouble copying to HDFS + */ + public LocalResource submitFile(File localFile, Path tempPath, String subdir, String destFileName) + throws IOException { + Path src = new Path(localFile.toString()); + Path subdirPath = new Path(tempPath, subdir); + fileSystem.mkdirs(subdirPath); + Path destPath = new Path(subdirPath, destFileName); + log.debug("Copying {} (size={} bytes) to {}", localFile, localFile.length(), destPath); + + fileSystem.copyFromLocalFile(false, true, src, destPath); + + // Set the type of resource - file or archive + // archives are untarred at destination + // we don't need the jar file to be untarred for now + return createAmResource(destPath, LocalResourceType.FILE); + } + + /** + * Submit the AM tar.gz resource referenced by the instance's cluster + * filesystem. Also, update the providerResources object with the new + * resource. + * + * @param providerResources + * the provider resource map to be updated + * @throws IOException + * trouble copying to HDFS + */ + public void submitTarGzipAndUpdate( + Map providerResources) throws IOException, + BadClusterStateException { + Path dependencyLibTarGzip = getDependencyTarGzip(); + LocalResource lc = createAmResource(dependencyLibTarGzip, + LocalResourceType.ARCHIVE); + providerResources.put(YarnServiceConstants.DEPENDENCY_LOCALIZED_DIR_LINK, lc); + } + + public void copyLocalFileToHdfs(File localPath, + Path destPath, FsPermission fp) + throws IOException { + if (localPath == null || destPath == null) { + throw new IOException("Either localPath or destPath is null"); + } + fileSystem.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, + "000"); + fileSystem.mkdirs(destPath.getParent(), fp); + log.info("Copying file {} to {}", localPath.toURI(), + fileSystem.getScheme() + ":/" + destPath.toUri()); + + fileSystem.copyFromLocalFile(false, true, new Path(localPath.getPath()), + destPath); + // set file permissions of the destPath + fileSystem.setPermission(destPath, fp); + } + + public void copyHdfsFileToLocal(Path hdfsPath, File destFile) + throws IOException { + if (hdfsPath == null || destFile == null) { + throw new IOException("Either hdfsPath or destPath is null"); + } + log.info("Copying file {} to {}", hdfsPath.toUri(), destFile.toURI()); + + Path destPath = new Path(destFile.getPath()); + fileSystem.copyToLocalFile(hdfsPath, destPath); + } + + /** + * list entries in a filesystem directory + * + * @param path directory + * @return a listing, one to a line + * @throws IOException + */ + public String listFSDir(Path path) throws IOException { + FileStatus[] stats = fileSystem.listStatus(path); + StringBuilder builder = new StringBuilder(); + for (FileStatus stat : stats) { + builder.append(stat.getPath().toString()) + .append("\t") + .append(stat.getLen()) + .append("\n"); + } + return builder.toString(); + } + + public String cat(Path path) throws IOException { + FileStatus status = fileSystem.getFileStatus(path); + byte[] b = new byte[(int) status.getLen()]; + FSDataInputStream in = null; + try { + in = fileSystem.open(path); + int count = in.read(b); + return new String(b, 0, count, UTF_8); + } finally { + IOUtils.closeStream(in); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Duration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Duration.java new file mode 100644 index 00000000000..6fadfd3af22 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Duration.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import java.io.Closeable; + +/** + * A duration in milliseconds. This class can be used + * to count time, and to be polled to see if a time limit has + * passed. + */ +public class Duration implements Closeable { + public long start, finish; + public final long limit; + + /** + * Create a duration instance with a limit of 0 + */ + public Duration() { + this(0); + } + + /** + * Create a duration with a limit specified in millis + * @param limit duration in milliseconds + */ + public Duration(long limit) { + this.limit = limit; + } + + /** + * Start + * @return self + */ + public Duration start() { + start = now(); + return this; + } + + /** + * The close operation relays to {@link #finish()}. + * Implementing it allows Duration instances to be automatically + * finish()'d in Java7 try blocks for when used in measuring durations. + */ + @Override + public final void close() { + finish(); + } + + public void finish() { + finish = now(); + } + + protected long now() { + return System.nanoTime()/1000000; + } + + public long getInterval() { + return finish - start; + } + + /** + * return true if the limit has been exceeded + * @return true if a limit was set and the current time + * exceeds it. + */ + public boolean getLimitExceeded() { + return limit >= 0 && ((now() - start) > limit); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Duration"); + if (finish >= start) { + builder.append(" finished at ").append(getInterval()).append(" millis;"); + } else { + if (start > 0) { + builder.append(" started but not yet finished;"); + } else { + builder.append(" unstarted;"); + } + } + if (limit > 0) { + builder.append(" limit: ").append(limit).append(" millis"); + if (getLimitExceeded()) { + builder.append(" - exceeded"); + } + } + return builder.toString(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java new file mode 100644 index 00000000000..7b22e3e0f82 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.codehaus.jackson.JsonGenerationException; +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.map.DeserializationConfig; +import org.codehaus.jackson.map.JsonMappingException; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.map.PropertyNamingStrategy; +import org.codehaus.jackson.map.SerializationConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.EOFException; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Support for marshalling objects to and from JSON. + * This class is NOT thread safe; it constructs an object mapper + * as an instance field. + * @param + */ +public class JsonSerDeser { + + private static final Logger log = LoggerFactory.getLogger(JsonSerDeser.class); + private static final String UTF_8 = "UTF-8"; + + private final Class classType; + private final ObjectMapper mapper; + + /** + * Create an instance bound to a specific type + * @param classType class type + */ + public JsonSerDeser(Class classType) { + this.classType = classType; + this.mapper = new ObjectMapper(); + mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false); + } + + public JsonSerDeser(Class classType, PropertyNamingStrategy namingStrategy) { + this(classType); + mapper.setPropertyNamingStrategy(namingStrategy); + } + + /** + * Convert from JSON + * @param json input + * @return the parsed JSON + * @throws IOException IO + * @throws JsonMappingException failure to map from the JSON to this class + */ + public T fromJson(String json) + throws IOException, JsonParseException, JsonMappingException { + try { + return mapper.readValue(json, classType); + } catch (IOException e) { + log.error("Exception while parsing json : " + e + "\n" + json, e); + throw e; + } + } + + /** + * Convert from a JSON file + * @param jsonFile input file + * @return the parsed JSON + * @throws IOException IO problems + * @throws JsonMappingException failure to map from the JSON to this class + */ + public T fromFile(File jsonFile) + throws IOException, JsonParseException, JsonMappingException { + File absoluteFile = jsonFile.getAbsoluteFile(); + try { + return mapper.readValue(absoluteFile, classType); + } catch (IOException e) { + log.error("Exception while parsing json file {}", absoluteFile, e); + throw e; + } + } + + /** + * Convert from a JSON file + * @param resource input file + * @return the parsed JSON + * @throws IOException IO problems + * @throws JsonMappingException failure to map from the JSON to this class + */ + public T fromResource(String resource) + throws IOException, JsonParseException, JsonMappingException { + try(InputStream resStream = this.getClass().getResourceAsStream(resource)) { + if (resStream == null) { + throw new FileNotFoundException(resource); + } + return (T) (mapper.readValue(resStream, classType)); + } catch (IOException e) { + log.error("Exception while parsing json resource {}", resource, e); + throw e; + } + } + + /** + * Convert from an input stream, closing the stream afterwards. + * @param stream + * @return the parsed JSON + * @throws IOException IO problems + */ + public T fromStream(InputStream stream) throws IOException { + try { + return (T) (mapper.readValue(stream, classType)); + } catch (IOException e) { + log.error("Exception while parsing json input stream", e); + throw e; + } finally { + IOUtils.closeStream(stream); + } + } + + /** + * clone by converting to JSON and back again. + * This is much less efficient than any Java clone process. + * @param instance instance to duplicate + * @return a new instance + * @throws IOException problems. + */ + public T fromInstance(T instance) throws IOException { + return fromJson(toJson(instance)); + } + + /** + * Deserialize from a byte array + * @param b + * @return the deserialized value + * @throws IOException parse problems + */ + public T fromBytes(byte[] b) throws IOException { + String json = new String(b, 0, b.length, UTF_8); + return fromJson(json); + } + + /** + * Load from a Hadoop filesystem + * @param fs filesystem + * @param path path + * @return a loaded CD + * @throws IOException IO problems + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public T load(FileSystem fs, Path path) + throws IOException, JsonParseException, JsonMappingException { + FileStatus status = fs.getFileStatus(path); + long len = status.getLen(); + byte[] b = new byte[(int) len]; + FSDataInputStream dataInputStream = fs.open(path); + int count = dataInputStream.read(b); + if (count != len) { + throw new EOFException("Read of " + path +" finished prematurely"); + } + return fromBytes(b); + } + + + /** + * Save to a hadoop filesystem + * @param fs filesystem + * @param path path + * @param instance instance to save + * @param overwrite should any existing file be overwritten + * @throws IOException IO exception + */ + public void save(FileSystem fs, Path path, T instance, + boolean overwrite) throws + IOException { + FSDataOutputStream dataOutputStream = fs.create(path, overwrite); + writeJsonAsBytes(instance, dataOutputStream); + } + + /** + * Save an instance to a file + * @param instance instance to save + * @param file file + * @throws IOException + */ + public void save(T instance, File file) throws + IOException { + writeJsonAsBytes(instance, new FileOutputStream(file.getAbsoluteFile())); + } + + /** + * Write the json as bytes -then close the file + * @param dataOutputStream an outout stream that will always be closed + * @throws IOException on any failure + */ + private void writeJsonAsBytes(T instance, + OutputStream dataOutputStream) throws IOException { + try { + String json = toJson(instance); + byte[] b = json.getBytes(UTF_8); + dataOutputStream.write(b); + dataOutputStream.flush(); + dataOutputStream.close(); + } finally { + IOUtils.closeStream(dataOutputStream); + } + } + + /** + * Convert an object to a JSON string + * @param instance instance to convert + * @return a JSON string description + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public String toJson(T instance) throws IOException, + JsonGenerationException, + JsonMappingException { + mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true); + return mapper.writeValueAsString(instance); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java new file mode 100644 index 00000000000..108ca22defe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import java.util.regex.Pattern; + +/** + * Utility class to validate strings against a predefined pattern. + */ +public class PatternValidator { + + public static final String E_INVALID_NAME = + "Invalid name %s does not match the pattern %s "; + private final Pattern valid; + private final String pattern; + + public PatternValidator(String pattern) { + this.pattern = pattern; + valid = Pattern.compile(pattern); + } + + /** + * Validate the name -restricting it to the set defined in + * @param name name to validate + * @throws IllegalArgumentException if not a valid name + */ + public void validate(String name) { + if (!matches(name)) { + throw new IllegalArgumentException( + String.format(E_INVALID_NAME, name, pattern)); + } + } + + /** + * Query to see if the pattern matches + * @param name name to validate + * @return true if the string matches the pattern + */ + public boolean matches(String name) { + return valid.matcher(name).matches(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java new file mode 100644 index 00000000000..1d64ed65e3f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * a scanner which can take an input string for a range or scan the lot. + */ +public class PortScanner { + private static Pattern NUMBER_RANGE = Pattern.compile("^(\\d+)\\s*-\\s*(\\d+)$"); + private static Pattern SINGLE_NUMBER = Pattern.compile("^\\d+$"); + + private List remainingPortsToCheck; + + public PortScanner() { + } + + public void setPortRange(String input) throws BadConfigException { + // first split based on commas + Set inputPorts= new TreeSet(); + String[] ranges = input.split(","); + for ( String range : ranges ) { + if (range.trim().isEmpty()) { + continue; + } + Matcher m = SINGLE_NUMBER.matcher(range.trim()); + if (m.find()) { + inputPorts.add(Integer.parseInt(m.group())); + continue; + } + m = NUMBER_RANGE.matcher(range.trim()); + if (m.find()) { + String[] boundaryValues = m.group(0).split("-"); + int start = Integer.parseInt(boundaryValues[0].trim()); + int end = Integer.parseInt(boundaryValues[1].trim()); + if (end < start) { + throw new BadConfigException("End of port range is before start: " + + range + " in input: " + input); + } + for (int i = start; i < end + 1; i++) { + inputPorts.add(i); + } + continue; + } + throw new BadConfigException("Bad port range: " + range + " in input: " + + input); + } + if (inputPorts.size() == 0) { + throw new BadConfigException("No ports found in range: " + input); + } + this.remainingPortsToCheck = new ArrayList(inputPorts); + } + + public List getRemainingPortsToCheck() { + return remainingPortsToCheck; + } + + public int getAvailablePort() throws SliderException, IOException { + if (remainingPortsToCheck != null) { + return getAvailablePortViaPortArray(); + } else { + return ServiceUtils.getOpenPort(); + } + } + + private int getAvailablePortViaPortArray() throws SliderException { + boolean found = false; + int availablePort = -1; + Iterator portsToCheck = this.remainingPortsToCheck.iterator(); + while (portsToCheck.hasNext() && !found) { + int portToCheck = portsToCheck.next(); + found = ServiceUtils.isPortAvailable(portToCheck); + if (found) { + availablePort = portToCheck; + portsToCheck.remove(); + } + } + + if (availablePort < 0) { + throw new SliderException(SliderExitCodes.EXIT_BAD_CONFIGURATION, + "No available ports found in configured range {}", + remainingPortsToCheck); + } + + return availablePort; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java new file mode 100644 index 00000000000..9d00b3ca119 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.map.SerializationConfig; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.io.IOException; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * JSON-serializable description of a published key-val configuration. + * + * The values themselves are not serialized in the external view; they have + * to be served up by the far end + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) +public class PublishedConfiguration { + + public String description; + public long updated; + + public String updatedTime; + + public Map entries = new HashMap<>(); + + public PublishedConfiguration() { + } + + /** + * build an empty published configuration + * @param description configuration description + */ + public PublishedConfiguration(String description) { + this.description = description; + } + + /** + * Build a configuration from the entries + * @param description configuration description + * @param entries entries to put + */ + public PublishedConfiguration(String description, + Iterable> entries) { + this.description = description; + putValues(entries); + } + + /** + * Build a published configuration, using the keys from keysource, + * but resolving the values from the value source, via Configuration.get() + * @param description configuration description + * @param keysource source of keys + * @param valuesource source of values + */ + public PublishedConfiguration(String description, + Iterable> keysource, + Configuration valuesource) { + this.description = description; + putValues(ConfigHelper.resolveConfiguration(keysource, valuesource)); + } + + + /** + * Is the configuration empty. This means either that it has not + * been given any values, or it is stripped down copy set down over the + * wire. + * @return true if it is empty + */ + public boolean isEmpty() { + return entries.isEmpty(); + } + + + public void setUpdated(long updated) { + this.updated = updated; + this.updatedTime = new Date(updated).toString(); + } + + public long getUpdated() { + return updated; + } + + /** + * Set the values from an iterable (this includes a Hadoop Configuration + * and Java properties object). + * Any existing value set is discarded + * @param entries entries to put + */ + public void putValues(Iterable> entries) { + this.entries = new HashMap(); + for (Map.Entry entry : entries) { + this.entries.put(entry.getKey(), entry.getValue()); + } + + } + + /** + * Convert to Hadoop XML + * @return the configuration as a Hadoop Configuratin + */ + public Configuration asConfiguration() { + Configuration conf = new Configuration(false); + try { + ConfigHelper.addConfigMap(conf, entries, ""); + } catch (BadConfigException e) { + // triggered on a null value; switch to a runtime (and discard the stack) + throw new RuntimeException(e.toString()); + } + return conf; + } + + public String asConfigurationXML() throws IOException { + return ConfigHelper.toXml(asConfiguration()); + } + + /** + * Convert values to properties + * @return a property file + */ + public Properties asProperties() { + Properties props = new Properties(); + props.putAll(entries); + return props; + } + + /** + * Return the values as json string + * @return the JSON representation + * @throws IOException marshalling failure + */ + public String asJson() throws IOException { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true); + String json = mapper.writeValueAsString(entries); + return json; + } + + + /** + * This makes a copy without the nested content -so is suitable + * for returning as part of the list of a parent's values + * @return the copy + */ + public PublishedConfiguration shallowCopy() { + PublishedConfiguration that = new PublishedConfiguration(); + that.description = this.description; + that.updated = this.updated; + that.updatedTime = this.updatedTime; + return that; + } + + @Override + public String toString() { + final StringBuilder sb = + new StringBuilder("PublishedConfiguration{"); + sb.append("description='").append(description).append('\''); + sb.append(" entries = ").append(entries.size()); + sb.append('}'); + return sb.toString(); + } + + /** + * Create an outputter for a given format + * @param format format to use + * @return an instance of output + */ + public PublishedConfigurationOutputter createOutputter(ConfigFormat format) { + return PublishedConfigurationOutputter.createOutputter(format, this); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java new file mode 100644 index 00000000000..88ecf2c7f03 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.DumperOptions.FlowStyle; +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.io.StringWriter; +import java.util.Properties; + +/** + * Output a published configuration + */ +public abstract class PublishedConfigurationOutputter { + + private static final String COMMENTS = "Generated by Apache Slider"; + + protected final PublishedConfiguration owner; + + protected PublishedConfigurationOutputter(PublishedConfiguration owner) { + this.owner = owner; + } + + /** + * Save the config to a destination file, in the format of this outputter + * @param dest destination file + * @throws IOException + */ +/* JDK7 + public void save(File dest) throws IOException { + try(FileOutputStream out = new FileOutputStream(dest)) { + save(out); + out.close(); + } + } +*/ + public void save(File dest) throws IOException { + FileUtils.writeStringToFile(dest, asString(), Charsets.UTF_8); + } + + /** + * Save the content. The default saves the asString() value + * to the output stream + * @param out output stream + * @throws IOException + */ + public void save(OutputStream out) throws IOException { + IOUtils.write(asString(), out, Charsets.UTF_8); + } + /** + * Convert to a string + * @return the string form + * @throws IOException + */ + public abstract String asString() throws IOException; + + /** + * Create an outputter for the chosen format + * @param format format enumeration + * @param owner owning config + * @return the outputter + */ + + public static PublishedConfigurationOutputter createOutputter(ConfigFormat format, + PublishedConfiguration owner) { + Preconditions.checkNotNull(owner); + switch (format) { + case XML: + case HADOOP_XML: + return new XmlOutputter(owner); + case PROPERTIES: + return new PropertiesOutputter(owner); + case JSON: + return new JsonOutputter(owner); + case ENV: + return new EnvOutputter(owner); + case TEMPLATE: + return new TemplateOutputter(owner); + case YAML: + return new YamlOutputter(owner); + default: + throw new RuntimeException("Unsupported format :" + format); + } + } + + public static class XmlOutputter extends PublishedConfigurationOutputter { + + + private final Configuration configuration; + + public XmlOutputter(PublishedConfiguration owner) { + super(owner); + configuration = owner.asConfiguration(); + } + + @Override + public void save(OutputStream out) throws IOException { + configuration.writeXml(out); + } + + @Override + public String asString() throws IOException { + return ConfigHelper.toXml(configuration); + } + + public Configuration getConfiguration() { + return configuration; + } + } + + public static class PropertiesOutputter extends PublishedConfigurationOutputter { + + private final Properties properties; + + public PropertiesOutputter(PublishedConfiguration owner) { + super(owner); + properties = owner.asProperties(); + } + + @Override + public void save(OutputStream out) throws IOException { + properties.store(out, COMMENTS); + } + + + public String asString() throws IOException { + StringWriter sw = new StringWriter(); + properties.store(sw, COMMENTS); + return sw.toString(); + } + } + + + public static class JsonOutputter extends PublishedConfigurationOutputter { + + public JsonOutputter(PublishedConfiguration owner) { + super(owner); + } + + @Override + public String asString() throws IOException { + return owner.asJson(); + } + } + + + public static class EnvOutputter extends PublishedConfigurationOutputter { + + public EnvOutputter(PublishedConfiguration owner) { + super(owner); + } + + @Override + public String asString() throws IOException { + if (!owner.entries.containsKey("content")) { + throw new IOException("Configuration has no content field and cannot " + + "be retrieved as type 'env'"); + } + String content = owner.entries.get("content"); + return ConfigUtils.replaceProps(owner.entries, content); + } + } + + public static class TemplateOutputter extends EnvOutputter { + public TemplateOutputter(PublishedConfiguration owner) { + super(owner); + } + } + + public static class YamlOutputter extends PublishedConfigurationOutputter { + + private final Yaml yaml; + + public YamlOutputter(PublishedConfiguration owner) { + super(owner); + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(FlowStyle.BLOCK); + yaml = new Yaml(options); + } + + public String asString() throws IOException { + return yaml.dump(owner.entries); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java new file mode 100644 index 00000000000..140204af011 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.service.utils.ApplicationReportSerDeser; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.io.IOException; + +/** + * Serialized form of an service report which can be persisted + * and then parsed. It can not be converted back into a + * real YARN service report + * + * Useful for testing + */ + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) + +public class SerializedApplicationReport { + + public String applicationId; + public String applicationAttemptId; + public String name; + public String applicationType; + public String user; + public String queue; + public String host; + public Integer rpcPort; + public String state; + public String diagnostics; + public String url; + /** + * This value is non-null only when a report is generated from a submission context. + * The YARN {@link ApplicationReport} structure does not propagate this value + * from the RM. + */ + public Long submitTime; + public Long startTime; + public Long finishTime; + public String finalStatus; + public String origTrackingUrl; + public Float progress; + + public SerializedApplicationReport() { + } + + public SerializedApplicationReport(ApplicationReport report) { + this.applicationId = report.getApplicationId().toString(); + ApplicationAttemptId attemptId = report.getCurrentApplicationAttemptId(); + this.applicationAttemptId = attemptId != null ? attemptId.toString() : "N/A"; + this.name = report.getName(); + this.applicationType = report.getApplicationType(); + this.user = report.getUser(); + this.queue = report.getQueue(); + this.host = report.getHost(); + this.rpcPort = report.getRpcPort(); + this.state = report.getYarnApplicationState().toString(); + this.diagnostics = report.getDiagnostics(); + this.startTime = report.getStartTime(); + this.finishTime = report.getFinishTime(); + FinalApplicationStatus appStatus = report.getFinalApplicationStatus(); + this.finalStatus = appStatus == null ? "" : appStatus.toString(); + this.progress = report.getProgress(); + this.url = report.getTrackingUrl(); + this.origTrackingUrl= report.getOriginalTrackingUrl(); + } + + @Override + public String toString() { + try { + return ApplicationReportSerDeser.toString(this); + } catch (IOException e) { + return super.toString(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java new file mode 100644 index 00000000000..b58cea89a43 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -0,0 +1,401 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils; +import org.apache.hadoop.yarn.service.conf.RestApiConstants; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; +import org.codehaus.jackson.map.PropertyNamingStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class ServiceApiUtil { + private static final Logger LOG = + LoggerFactory.getLogger(ServiceApiUtil.class); + public static JsonSerDeser jsonSerDeser = + new JsonSerDeser<>(Service.class, + PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES); + private static final PatternValidator namePattern + = new PatternValidator("[a-z][a-z0-9-]*"); + + @VisibleForTesting + public static void setJsonSerDeser(JsonSerDeser jsd) { + jsonSerDeser = jsd; + } + + @VisibleForTesting + public static void validateAndResolveService(Service service, + SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws + IOException { + boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, + RegistryConstants.DEFAULT_DNS_ENABLED); + if (dnsEnabled && RegistryUtils.currentUser().length() > RegistryConstants + .MAX_FQDN_LABEL_LENGTH) { + throw new IllegalArgumentException(RestApiErrorMessages + .ERROR_USER_NAME_INVALID); + } + if (StringUtils.isEmpty(service.getName())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID); + } + + validateNameFormat(service.getName(), conf); + + // If the service has no components, throw error + if (!hasComponent(service)) { + throw new IllegalArgumentException( + "No component specified for " + service.getName()); + } + + // Validate there are no component name collisions (collisions are not + // currently supported) and add any components from external services + Configuration globalConf = service.getConfiguration(); + Set componentNames = new HashSet<>(); + List componentsToRemove = new ArrayList<>(); + List componentsToAdd = new ArrayList<>(); + for (Component comp : service.getComponents()) { + int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH; + maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length(); + if (dnsEnabled && comp.getName().length() > maxCompLength) { + throw new IllegalArgumentException(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName())); + } + if (componentNames.contains(comp.getName())) { + throw new IllegalArgumentException("Component name collision: " + + comp.getName()); + } + // If artifact is of type SERVICE (which cannot be filled from + // global), read external service and add its components to this + // service + if (comp.getArtifact() != null && comp.getArtifact().getType() == + Artifact.TypeEnum.SERVICE) { + if (StringUtils.isEmpty(comp.getArtifact().getId())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); + } + LOG.info("Marking {} for removal", comp.getName()); + componentsToRemove.add(comp); + List externalComponents = getComponents(fs, + comp.getArtifact().getId()); + for (Component c : externalComponents) { + Component override = service.getComponent(c.getName()); + if (override != null && override.getArtifact() == null) { + // allow properties from external components to be overridden / + // augmented by properties in this component, except for artifact + // which must be read from external component + override.mergeFrom(c); + LOG.info("Merging external component {} from external {}", c + .getName(), comp.getName()); + } else { + if (componentNames.contains(c.getName())) { + throw new IllegalArgumentException("Component name collision: " + + c.getName()); + } + componentNames.add(c.getName()); + componentsToAdd.add(c); + LOG.info("Adding component {} from external {}", c.getName(), + comp.getName()); + } + } + } else { + // otherwise handle as a normal component + componentNames.add(comp.getName()); + // configuration + comp.getConfiguration().mergeFrom(globalConf); + } + } + service.getComponents().removeAll(componentsToRemove); + service.getComponents().addAll(componentsToAdd); + + // Validate components and let global values take effect if component level + // values are not provided + Artifact globalArtifact = service.getArtifact(); + Resource globalResource = service.getResource(); + for (Component comp : service.getComponents()) { + // fill in global artifact unless it is type SERVICE + if (comp.getArtifact() == null && service.getArtifact() != null + && service.getArtifact().getType() != Artifact.TypeEnum + .SERVICE) { + comp.setArtifact(globalArtifact); + } + // fill in global resource + if (comp.getResource() == null) { + comp.setResource(globalResource); + } + // validate dependency existence + if (comp.getDependencies() != null) { + for (String dependency : comp.getDependencies()) { + if (!componentNames.contains(dependency)) { + throw new IllegalArgumentException(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency, + comp.getName())); + } + } + } + validateComponent(comp, fs.getFileSystem(), conf); + } + + // validate dependency tree + sortByDependencies(service.getComponents()); + + // Service lifetime if not specified, is set to unlimited lifetime + if (service.getLifetime() == null) { + service.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME); + } + } + + private static void validateComponent(Component comp, FileSystem fs, + org.apache.hadoop.conf.Configuration conf) + throws IOException { + validateNameFormat(comp.getName(), conf); + + AbstractClientProvider compClientProvider = ProviderFactory + .getClientProvider(comp.getArtifact()); + compClientProvider.validateArtifact(comp.getArtifact(), fs); + + if (comp.getLaunchCommand() == null && (comp.getArtifact() == null || comp + .getArtifact().getType() != Artifact.TypeEnum.DOCKER)) { + throw new IllegalArgumentException(RestApiErrorMessages + .ERROR_ABSENT_LAUNCH_COMMAND); + } + + validateServiceResource(comp.getResource(), comp); + + if (comp.getNumberOfContainers() == null + || comp.getNumberOfContainers() < 0) { + throw new IllegalArgumentException(String.format( + RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID + + ": " + comp.getNumberOfContainers(), comp.getName())); + } + compClientProvider.validateConfigFiles(comp.getConfiguration() + .getFiles(), fs); + + MonitorUtils.getProbe(comp.getReadinessCheck()); + } + + // Check component or service name format and transform to lower case. + public static void validateNameFormat(String name, + org.apache.hadoop.conf.Configuration conf) { + if (StringUtils.isEmpty(name)) { + throw new IllegalArgumentException("Name can not be empty!"); + } + // validate component name + if (name.contains("_")) { + throw new IllegalArgumentException( + "Invalid format: " + name + + ", can not use '_', as DNS hostname does not allow '_'. Use '-' Instead. "); + } + boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, + RegistryConstants.DEFAULT_DNS_ENABLED); + if (dnsEnabled && name.length() > RegistryConstants.MAX_FQDN_LABEL_LENGTH) { + throw new IllegalArgumentException(String + .format("Invalid format %s, must be no more than 63 characters ", + name)); + } + namePattern.validate(name); + } + + @VisibleForTesting + public static List getComponents(SliderFileSystem + fs, String serviceName) throws IOException { + return loadService(fs, serviceName).getComponents(); + } + + public static Service loadService(SliderFileSystem fs, String + serviceName) throws IOException { + Path serviceJson = getServiceJsonPath(fs, serviceName); + LOG.info("Loading service definition from " + serviceJson); + return jsonSerDeser.load(fs.getFileSystem(), serviceJson); + } + + public static Service loadServiceFrom(SliderFileSystem fs, + Path appDefPath) throws IOException { + LOG.info("Loading service definition from " + appDefPath); + return jsonSerDeser.load(fs.getFileSystem(), appDefPath); + } + + public static Path getServiceJsonPath(SliderFileSystem fs, String serviceName) { + Path serviceDir = fs.buildClusterDirPath(serviceName); + return new Path(serviceDir, serviceName + ".json"); + } + + private static void validateServiceResource(Resource resource, + Component comp) { + // Only services/components of type SERVICE can skip resource requirement + if (resource == null) { + throw new IllegalArgumentException( + comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String + .format(RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID, + comp.getName())); + } + // One and only one of profile OR cpus & memory can be specified. Specifying + // both raises validation error. + if (StringUtils.isNotEmpty(resource.getProfile()) && ( + resource.getCpus() != null || StringUtils + .isNotEmpty(resource.getMemory()))) { + throw new IllegalArgumentException(comp == null ? + RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED : + String.format( + RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, + comp.getName())); + } + // Currently resource profile is not supported yet, so we will raise + // validation error if only resource profile is specified + if (StringUtils.isNotEmpty(resource.getProfile())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET); + } + + String memory = resource.getMemory(); + Integer cpus = resource.getCpus(); + if (StringUtils.isEmpty(memory)) { + throw new IllegalArgumentException( + comp == null ? RestApiErrorMessages.ERROR_RESOURCE_MEMORY_INVALID : + String.format( + RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, + comp.getName())); + } + if (cpus == null) { + throw new IllegalArgumentException( + comp == null ? RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID : + String.format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID, + comp.getName())); + } + if (cpus <= 0) { + throw new IllegalArgumentException(comp == null ? + RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID_RANGE : String + .format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, + comp.getName())); + } + } + + // check if comp mem size exceeds cluster limit + public static void validateCompResourceSize( + org.apache.hadoop.yarn.api.records.Resource maxResource, + Service service) throws YarnException { + for (Component component : service.getComponents()) { + // only handle mem now. + long mem = Long.parseLong(component.getResource().getMemory()); + if (mem > maxResource.getMemorySize()) { + throw new YarnException( + "Component " + component.getName() + " memory size (" + mem + + ") is larger than configured max container memory size (" + + maxResource.getMemorySize() + ")"); + } + } + } + + private static boolean hasComponent(Service service) { + if (service.getComponents() == null || service.getComponents() + .isEmpty()) { + return false; + } + return true; + } + + public static Collection sortByDependencies(List + components) { + Map sortedComponents = + sortByDependencies(components, null); + return sortedComponents.values(); + } + + /** + * Each internal call of sortByDependencies will identify all of the + * components with the same dependency depth (the lowest depth that has not + * been processed yet) and add them to the sortedComponents list, preserving + * their original ordering in the components list. + * + * So the first time it is called, all components with no dependencies + * (depth 0) will be identified. The next time it is called, all components + * that have dependencies only on the the depth 0 components will be + * identified (depth 1). This will be repeated until all components have + * been added to the sortedComponents list. If no new components are + * identified but the sortedComponents list is not complete, an error is + * thrown. + */ + private static Map sortByDependencies(List + components, Map sortedComponents) { + if (sortedComponents == null) { + sortedComponents = new LinkedHashMap<>(); + } + + Map componentsToAdd = new LinkedHashMap<>(); + List componentsSkipped = new ArrayList<>(); + for (Component component : components) { + String name = component.getName(); + if (sortedComponents.containsKey(name)) { + continue; + } + boolean dependenciesAlreadySorted = true; + if (!ServiceUtils.isEmpty(component.getDependencies())) { + for (String dependency : component.getDependencies()) { + if (!sortedComponents.containsKey(dependency)) { + dependenciesAlreadySorted = false; + break; + } + } + } + if (dependenciesAlreadySorted) { + componentsToAdd.put(name, component); + } else { + componentsSkipped.add(component); + } + } + + if (componentsToAdd.size() == 0) { + throw new IllegalArgumentException(String.format(RestApiErrorMessages + .ERROR_DEPENDENCY_CYCLE, componentsSkipped)); + } + sortedComponents.putAll(componentsToAdd); + if (sortedComponents.size() == components.size()) { + return sortedComponents; + } + return sortByDependencies(components, sortedComponents); + } + + public static String $(String s) { + return "${" + s +"}"; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java new file mode 100644 index 00000000000..7440b117ba5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; + + +public class ServiceRegistryUtils { + + /** + * Base path for services + */ + public static final String ZK_SERVICES = "services"; + + /** + * Base path for all Slider references + */ + public static final String ZK_SLIDER = "slider"; + public static final String ZK_USERS = "users"; + public static final String SVC_SLIDER = "/" + ZK_SERVICES + "/" + ZK_SLIDER; + public static final String SVC_SLIDER_USERS = SVC_SLIDER + "/" + ZK_USERS; + + /** + * Get the registry path for an instance under the user's home node + * @param instanceName application instance + * @return a path to the registry location for this application instance. + */ + public static String registryPathForInstance(String instanceName) { + return RegistryUtils.servicePath( + RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, instanceName + ); + } + + /** + * Build the path to a cluster; exists once the cluster has come up. + * Even before that, a ZK watcher could wait for it. + * @param username user + * @param clustername name of the cluster + * @return a strin + */ + public static String mkClusterPath(String username, String clustername) { + return mkSliderUserPath(username) + "/" + clustername; + } + + /** + * Build the path to a cluster; exists once the cluster has come up. + * Even before that, a ZK watcher could wait for it. + * @param username user + * @return a string + */ + public static String mkSliderUserPath(String username) { + return SVC_SLIDER_USERS + "/" + username; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java new file mode 100644 index 00000000000..e18bcae1155 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java @@ -0,0 +1,545 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.URL; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.regex.Pattern; +import java.util.zip.GZIPOutputStream; + +/** + * These are slider-specific Util methods + */ +public final class ServiceUtils { + + private static final Logger log = LoggerFactory.getLogger(ServiceUtils.class); + + private ServiceUtils() { + } + + /** + * Implementation of set-ness, groovy definition of true/false for a string + * @param s string + * @return true iff the string is neither null nor empty + */ + public static boolean isUnset(String s) { + return s == null || s.isEmpty(); + } + + public static boolean isSet(String s) { + return !isUnset(s); + } + + public static boolean isEmpty(Collection l) { + return l == null || l.isEmpty(); + } + + /** + * Find a containing JAR + * @param clazz class to find + * @return the file + * @throws IOException any IO problem, including the class not having a + * classloader + * @throws FileNotFoundException if the class did not resolve to a file + */ + public static File findContainingJarOrFail(Class clazz) throws IOException { + File localFile = ServiceUtils.findContainingJar(clazz); + if (null == localFile) { + throw new FileNotFoundException("Could not find JAR containing " + clazz); + } + return localFile; + } + + + /** + * Find a containing JAR + * @param my_class class to find + * @return the file or null if it is not found + * @throws IOException any IO problem, including the class not having a + * classloader + */ + public static File findContainingJar(Class my_class) throws IOException { + ClassLoader loader = my_class.getClassLoader(); + if (loader == null) { + throw new IOException( + "Class " + my_class + " does not have a classloader!"); + } + String class_file = my_class.getName().replaceAll("\\.", "/") + ".class"; + Enumeration urlEnumeration = loader.getResources(class_file); + for (; urlEnumeration.hasMoreElements(); ) { + URL url = urlEnumeration.nextElement(); + if ("jar".equals(url.getProtocol())) { + String toReturn = url.getPath(); + if (toReturn.startsWith("file:")) { + toReturn = toReturn.substring("file:".length()); + } + // URLDecoder is a misnamed class, since it actually decodes + // x-www-form-urlencoded MIME type rather than actual + // URL encoding (which the file path has). Therefore it would + // decode +s to ' 's which is incorrect (spaces are actually + // either unencoded or encoded as "%20"). Replace +s first, so + // that they are kept sacred during the decoding process. + toReturn = toReturn.replaceAll("\\+", "%2B"); + toReturn = URLDecoder.decode(toReturn, "UTF-8"); + String jarFilePath = toReturn.replaceAll("!.*$", ""); + return new File(jarFilePath); + } else { + log.info("could not locate JAR containing {} URL={}", my_class, url); + } + } + return null; + } + + /** + * Copy a file to a new FS -both paths must be qualified. + * @param conf conf file + * @param srcFile src file + * @param destFile dest file + */ + @SuppressWarnings("deprecation") + public static void copy(Configuration conf, + Path srcFile, + Path destFile) throws + IOException, + BadClusterStateException { + FileSystem srcFS = FileSystem.get(srcFile.toUri(), conf); + //list all paths in the src. + if (!srcFS.exists(srcFile)) { + throw new FileNotFoundException("Source file not found " + srcFile); + } + if (!srcFS.isFile(srcFile)) { + throw new FileNotFoundException( + "Source file not a file " + srcFile); + } + FileSystem destFS = FileSystem.get(destFile.toUri(), conf); + FileUtil.copy(srcFS, srcFile, destFS, destFile, false, true, conf); + } + + /** + * Take a collection, return a list containing the string value of every + * element in the collection. + * @param c collection + * @return a stringified list + */ + public static List collectionToStringList(Collection c) { + List l = new ArrayList<>(c.size()); + for (Object o : c) { + l.add(o.toString()); + } + return l; + } + + /** + * Join an collection of objects with a separator that appears after every + * instance in the list -including at the end + * @param collection collection to call toString() on each element + * @param separator separator string + * @return the joined entries + */ + public static String join(Collection collection, String separator) { + return join(collection, separator, true); + } + + /** + * Join an collection of objects with a separator that appears after every + * instance in the list -optionally at the end + * @param collection collection to call toString() on each element + * @param separator separator string + * @param trailing add a trailing entry or not + * @return the joined entries + */ + public static String join(Collection collection, + String separator, + boolean trailing) { + StringBuilder b = new StringBuilder(); + // fast return on empty collection + if (collection.isEmpty()) { + return trailing ? separator : ""; + } + for (Object o : collection) { + b.append(o); + b.append(separator); + } + int length = separator.length(); + String s = b.toString(); + return (trailing || s.isEmpty()) ? + s : (b.substring(0, b.length() - length)); + } + + /** + * Join an array of strings with a separator that appears after every + * instance in the list -optionally at the end + * @param collection strings + * @param separator separator string + * @param trailing add a trailing entry or not + * @return the joined entries + */ + public static String join(String[] collection, String separator, + boolean trailing) { + return join(Arrays.asList(collection), separator, trailing); + } + + /** + * Resolve a mandatory environment variable + * @param key env var + * @return the resolved value + * @throws BadClusterStateException + */ + public static String mandatoryEnvVariable(String key) throws + BadClusterStateException { + String v = System.getenv(key); + if (v == null) { + throw new BadClusterStateException("Missing Environment variable " + key); + } + return v; + } + + /** + * Generic map merge logic + * @param first first map + * @param second second map + * @param key type + * @param value type + * @return 'first' merged with the second + */ + public static Map mergeMapsIgnoreDuplicateKeys(Map first, + Map second) { + Preconditions.checkArgument(first != null, "Null 'first' value"); + Preconditions.checkArgument(second != null, "Null 'second' value"); + for (Map.Entry entry : second.entrySet()) { + T1 key = entry.getKey(); + if (!first.containsKey(key)) { + first.put(key, entry.getValue()); + } + } + return first; + } + + /** + * Convert a map to a multi-line string for printing + * @param map map to stringify + * @return a string representation of the map + */ + public static String stringifyMap(Map map) { + StringBuilder builder = new StringBuilder(); + for (Map.Entry entry : map.entrySet()) { + builder.append(entry.getKey()) + .append("=\"") + .append(entry.getValue()) + .append("\"\n"); + + } + return builder.toString(); + } + + /** + * Convert a YARN URL into a string value of a normal URL + * @param url URL + * @return string representatin + */ + public static String stringify(org.apache.hadoop.yarn.api.records.URL url) { + StringBuilder builder = new StringBuilder(); + builder.append(url.getScheme()).append("://"); + if (url.getHost() != null) { + builder.append(url.getHost()).append(":").append(url.getPort()); + } + builder.append(url.getFile()); + return builder.toString(); + } + + /** + * Get a random open port + * @return true if the port was available for listening on + */ + public static int getOpenPort() throws IOException { + ServerSocket socket = null; + try { + socket = new ServerSocket(0); + return socket.getLocalPort(); + } finally { + if (socket != null) { + socket.close(); + } + } + } + + /** + * See if a port is available for listening on by trying to listen + * on it and seeing if that works or fails. + * @param port port to listen to + * @return true if the port was available for listening on + */ + public static boolean isPortAvailable(int port) { + try { + ServerSocket socket = new ServerSocket(port); + socket.close(); + return true; + } catch (IOException e) { + return false; + } + } + + // Build env map: key -> value; + // value will be replaced by the corresponding value in tokenMap, if any. + public static Map buildEnvMap( + org.apache.hadoop.yarn.service.api.records.Configuration conf, + Map tokenMap) { + if (tokenMap == null) { + return conf.getEnv(); + } + Map env = new HashMap<>(); + for (Map.Entry entry : conf.getEnv().entrySet()) { + String key = entry.getKey(); + String val = entry.getValue(); + for (Map.Entry token : tokenMap.entrySet()) { + val = val.replaceAll(Pattern.quote(token.getKey()), + token.getValue()); + } + env.put(key,val); + } + return env; + } + + public static String[] getLibDirs() { + String libDirStr = System.getProperty(YarnServiceConstants.PROPERTY_LIB_DIR); + if (isUnset(libDirStr)) { + return ArrayUtils.EMPTY_STRING_ARRAY; + } + return StringUtils.split(libDirStr, ','); + } + + /** + * Submit a JAR containing a specific class and map it + * @param providerResources provider map to build up + * @param sliderFileSystem remote fs + * @param clazz class to look for + * @param libdir lib directory + * @param jarName At the destination + * @return the local resource ref + * @throws IOException trouble copying to HDFS + */ + public static LocalResource putJar(Map providerResources, + SliderFileSystem sliderFileSystem, + Class clazz, + Path tempPath, + String libdir, + String jarName + ) + throws IOException, SliderException { + LocalResource res = sliderFileSystem.submitJarWithClass( + clazz, + tempPath, + libdir, + jarName); + providerResources.put(libdir + "/" + jarName, res); + return res; + } + + /** + * Submit a JAR containing and map it + * @param providerResources provider map to build up + * @param sliderFileSystem remote fs + * @param libDir lib directory + * @param srcPath copy jars from + */ + public static void putAllJars(Map providerResources, + SliderFileSystem sliderFileSystem, + Path tempPath, + String libDir, + String srcPath) throws IOException, SliderException { + log.debug("Loading all dependencies from {}", srcPath); + if (ServiceUtils.isSet(srcPath)) { + File srcFolder = new File(srcPath); + FilenameFilter jarFilter = createJarFilter(); + File[] listOfJars = srcFolder.listFiles(jarFilter); + if (listOfJars == null || listOfJars.length == 0) { + return; + } + for (File jarFile : listOfJars) { + LocalResource res = sliderFileSystem.submitFile(jarFile, tempPath, libDir, jarFile.getName()); + providerResources.put(libDir + "/" + jarFile.getName(), res); + } + } + } + + /** + * Accept all filenames ending with {@code .jar} + * @return a filename filter + */ + public static FilenameFilter createJarFilter() { + return new FilenameFilter() { + public boolean accept(File dir, String name) { + return name.toLowerCase(Locale.ENGLISH).endsWith(".jar"); + } + }; + } + + /** + * Create a file:// path from a local file + * @param file file to point the path + * @return a new Path + */ + public static Path createLocalPath(File file) { + return new Path(file.toURI()); + } + + /** + * Build up the classpath for execution + * -behaves very differently on a mini test cluster vs a production + * production one. + * + * @param sliderConfDir relative path to the dir containing slider config + * options to put on the classpath -or null + * @param libdir directory containing the JAR files + * @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use + * (and hence the current classpath should be used, not anything built up) + * @return a classpath + */ + public static ClasspathConstructor buildClasspath(String sliderConfDir, + String libdir, + SliderFileSystem sliderFileSystem, + boolean usingMiniMRCluster) { + + ClasspathConstructor classpath = new ClasspathConstructor(); + classpath.append(YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME); + + // add the runtime classpath needed for tests to work + if (usingMiniMRCluster) { + // for mini cluster we pass down the java CP properties + // and nothing else + classpath.appendAll(classpath.localJVMClasspath()); + } else { + if (sliderConfDir != null) { + classpath.addClassDirectory(sliderConfDir); + } + classpath.addLibDir(libdir); + if (sliderFileSystem.isFile(sliderFileSystem.getDependencyTarGzip())) { + classpath.addLibDir(YarnServiceConstants.DEPENDENCY_LOCALIZED_DIR_LINK); + } + classpath.addRemoteClasspathEnvVar(); + classpath.append(ApplicationConstants.Environment.HADOOP_CONF_DIR.$$()); + } + return classpath; + } + + /** + * Given a source folder create a tar.gz file + * + * @param libDirs + * @param tarGzipFile + * + * @throws IOException + */ + public static void tarGzipFolder(String[] libDirs, File tarGzipFile, + FilenameFilter filter) throws IOException { + log.info("Tar-gzipping folders {} to {}", libDirs, + tarGzipFile.getAbsolutePath()); + + try(TarArchiveOutputStream taos = + new TarArchiveOutputStream(new GZIPOutputStream( + new BufferedOutputStream(new FileOutputStream(tarGzipFile))))) { + for (String libDir : libDirs) { + File srcFolder = new File(libDir); + List files = new ArrayList<>(); + generateFileList(files, srcFolder, srcFolder, true, filter); + for (String file : files) { + File srcFile = new File(srcFolder, file); + TarArchiveEntry tarEntry = new TarArchiveEntry( + srcFile, file); + taos.putArchiveEntry(tarEntry); + try(FileInputStream in = new FileInputStream(srcFile)) { + org.apache.commons.io.IOUtils.copy(in, taos); + } + taos.flush(); + taos.closeArchiveEntry(); + } + } + } + } + + private static void generateFileList(List fileList, File node, + File rootFolder, Boolean relative, FilenameFilter filter) { + if (node.isFile()) { + String fileFullPath = node.toString(); + if (relative) { + fileList.add(fileFullPath.substring(rootFolder.toString().length() + 1, + fileFullPath.length())); + } else { + fileList.add(fileFullPath); + } + } + + if (node.isDirectory()) { + String[] subNode = node.list(filter); + if (subNode == null || subNode.length == 0) { + return; + } + for (String filename : subNode) { + generateFileList(fileList, new File(node, filename), rootFolder, + relative, filter); + } + } + } + + public static String createNameTag(String name) { + return "Name: " + name; + } + + public static String createVersionTag(String version) { + return "Version: " + version; + } + + public static String createDescriptionTag(String description) { + return "Description: " + description; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java new file mode 100644 index 00000000000..d6d664ea2e6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import java.io.IOException; + +/** + * Extends Core Filesystem with operations to manipulate ClusterDescription + * persistent state + */ +public class SliderFileSystem extends CoreFileSystem { + + Path appDir = null; + + public SliderFileSystem(FileSystem fileSystem, + Configuration configuration) { + super(fileSystem, configuration); + } + + public SliderFileSystem(Configuration configuration) throws IOException { + super(configuration); + } + + public void setAppDir(Path appDir) { + this.appDir = appDir; + } + + public Path getAppDir() { + return this.appDir; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java new file mode 100644 index 00000000000..c6e85252593 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.net.HostAndPort; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; + +import java.util.ArrayList; +import java.util.List; + +public class ZookeeperUtils { + public static final int DEFAULT_PORT = 2181; + + public static String buildConnectionString(String zkHosts, int port) { + String zkPort = Integer.toString(port); + //parse the hosts + String[] hostlist = zkHosts.split(",", 0); + String quorum = ServiceUtils.join(hostlist, ":" + zkPort + ",", false); + return quorum; + } + + /** + * Take a quorum list and split it to (trimmed) pairs + * @param hostPortQuorumList list of form h1:port, h2:port2,... + * @return a possibly empty list of values between commas. They may not be + * valid hostname:port pairs + */ + public static List splitToPairs(String hostPortQuorumList) { + // split an address hot + String[] strings = StringUtils.getStrings(hostPortQuorumList); + int len = 0; + if (strings != null) { + len = strings.length; + } + List tuples = new ArrayList(len); + if (strings != null) { + for (String s : strings) { + tuples.add(s.trim()); + } + } + return tuples; + } + + /** + * Split a quorum list into a list of hostnames and ports + * @param hostPortQuorumList split to a list of hosts and ports + * @return a list of values + */ + public static List splitToHostsAndPorts(String hostPortQuorumList) { + // split an address hot + String[] strings = StringUtils.getStrings(hostPortQuorumList); + int len = 0; + if (strings != null) { + len = strings.length; + } + List list = new ArrayList(len); + if (strings != null) { + for (String s : strings) { + list.add(HostAndPort.fromString(s.trim()).withDefaultPort(DEFAULT_PORT)); + } + } + return list; + } + + /** + * Build up to a hosts only list + * @param hostAndPorts + * @return a list of the hosts only + */ + public static String buildHostsOnlyList(List hostAndPorts) { + StringBuilder sb = new StringBuilder(); + for (HostAndPort hostAndPort : hostAndPorts) { + sb.append(hostAndPort.getHostText()).append(","); + } + if (sb.length() > 0) { + sb.delete(sb.length() - 1, sb.length()); + } + return sb.toString(); + } + + public static String buildQuorumEntry(HostAndPort hostAndPort, + int defaultPort) { + String s = hostAndPort.toString(); + if (hostAndPort.hasPort()) { + return s; + } else { + return s + ":" + defaultPort; + } + } + + /** + * Build a quorum list, injecting a ":defaultPort" ref if needed on + * any entry without one + * @param hostAndPorts + * @param defaultPort + * @return + */ + public static String buildQuorum(List hostAndPorts, int defaultPort) { + List entries = new ArrayList(hostAndPorts.size()); + for (HostAndPort hostAndPort : hostAndPorts) { + entries.add(buildQuorumEntry(hostAndPort, defaultPort)); + } + return ServiceUtils.join(entries, ",", false); + } + + public static String convertToHostsOnlyList(String quorum) throws + BadConfigException { + List hostAndPorts = splitToHostsAndPortsStrictly(quorum); + return ZookeeperUtils.buildHostsOnlyList(hostAndPorts); + } + + public static List splitToHostsAndPortsStrictly(String quorum) throws + BadConfigException { + List hostAndPorts = + ZookeeperUtils.splitToHostsAndPorts(quorum); + if (hostAndPorts.isEmpty()) { + throw new BadConfigException("empty zookeeper quorum"); + } + return hostAndPorts; + } + + public static int getFirstPort(String quorum, int defVal) throws + BadConfigException { + List hostAndPorts = splitToHostsAndPortsStrictly(quorum); + int port = hostAndPorts.get(0).getPortOrDefault(defVal); + return port; + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto new file mode 100644 index 00000000000..0a21c240d70 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.proto"; +option java_outer_classname = "ClientAMProtocol"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +service ClientAMProtocolService { + rpc flexComponents(FlexComponentsRequestProto) returns (FlexComponentsResponseProto); + rpc getStatus(GetStatusRequestProto) returns (GetStatusResponseProto); + rpc stop(StopRequestProto) returns (StopResponseProto); +} + +message FlexComponentsRequestProto { + repeated ComponentCountProto components = 1; +} + +message ComponentCountProto { + optional string name = 1; + optional int64 numberOfContainers = 2; +} + +message FlexComponentsResponseProto{ +} + +message GetStatusRequestProto { + +} +message GetStatusResponseProto { + optional string status = 1; +} + +message StopRequestProto { + +} + +message StopResponseProto { + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java new file mode 100644 index 00000000000..d343a03dfcd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.base.Supplier; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.NMClient; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; +import org.apache.hadoop.yarn.client.api.async.NMClientAsync; +import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.ClientAMProtocol; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.service.component.ComponentState; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeoutException; + +import static org.mockito.Mockito.mock; + +public class MockServiceAM extends ServiceMaster { + + Service service; + // The list of containers fed by tests to be returned on + // AMRMClientCallBackHandler#onContainersAllocated + final List feedContainers = + Collections.synchronizedList(new LinkedList<>()); + + public MockServiceAM(Service service) { + super(service.getName()); + this.service = service; + } + + + @Override + protected ContainerId getAMContainerId() + throws BadClusterStateException { + return ContainerId.newContainerId(ApplicationAttemptId + .newInstance(ApplicationId.fromString(service.getId()), 1), 1); + } + + @Override + protected Path getAppDir() { + Path path = new Path(new Path("target", "apps"), service.getName()); + System.out.println("Service path: " + path); + return path; + } + + @Override + protected ServiceScheduler createServiceScheduler(ServiceContext context) + throws IOException, YarnException { + return new ServiceScheduler(context) { + + @Override + protected YarnRegistryViewForProviders createYarnRegistryOperations( + ServiceContext context, RegistryOperations registryClient) { + return mock(YarnRegistryViewForProviders.class); + } + + @Override + protected AMRMClientAsync createAMRMClient() { + AMRMClientImpl client1 = new AMRMClientImpl() { + @Override public AllocateResponse allocate(float progressIndicator) + throws YarnException, IOException { + + AllocateResponse.AllocateResponseBuilder builder = + AllocateResponse.newBuilder(); + synchronized (feedContainers) { + if (feedContainers.isEmpty()) { + System.out.println("Allocating........ no containers"); + return builder.build(); + } else { + // The AMRMClient will return containers for compoenent that are + // at FLEXING state + List allocatedContainers = new LinkedList<>(); + Iterator itor = feedContainers.iterator(); + while (itor.hasNext()) { + Container c = itor.next(); + org.apache.hadoop.yarn.service.component.Component component = + componentsById.get(c.getAllocationRequestId()); + if (component.getState() == ComponentState.FLEXING) { + System.out.println("Allocated container " + c.getId()); + allocatedContainers.add(c); + itor.remove(); + } + } + return builder.allocatedContainers(allocatedContainers).build(); + } + } + } + + @Override + public RegisterApplicationMasterResponse registerApplicationMaster( + String appHostName, int appHostPort, String appTrackingUrl) { + return mock(RegisterApplicationMasterResponse.class); + } + + @Override public void unregisterApplicationMaster( + FinalApplicationStatus appStatus, String appMessage, + String appTrackingUrl) { + // DO nothing + } + }; + + return AMRMClientAsync + .createAMRMClientAsync(client1, 1000, + this.new AMRMClientCallback()); + } + + @Override + public NMClientAsync createNMClient() { + NMClientAsync nmClientAsync = super.createNMClient(); + nmClientAsync.setClient(mock(NMClient.class)); + return nmClientAsync; + } + }; + } + + @Override protected void loadApplicationJson(ServiceContext context, + SliderFileSystem fs) throws IOException { + context.service = service; + } + + /** + * + * @param service The service for the component + * @param id The id for the container + * @param compName The component to which the container is fed + * @return + */ + public Container feedContainerToComp(Service service, int id, + String compName) { + ApplicationId applicationId = ApplicationId.fromString(service.getId()); + ContainerId containerId = ContainerId + .newContainerId(ApplicationAttemptId.newInstance(applicationId, 1), id); + NodeId nodeId = NodeId.newInstance("localhost", 1234); + Container container = Container + .newInstance(containerId, nodeId, "localhost", + Resource.newInstance(100, 1), Priority.newInstance(0), null); + + long allocateId = + context.scheduler.getAllComponents().get(compName).getAllocateId(); + container.setAllocationRequestId(allocateId); + synchronized (feedContainers) { + feedContainers.add(container); + } + return container; + } + + public void flexComponent(String compName, long numberOfContainers) + throws IOException { + ClientAMProtocol.ComponentCountProto componentCountProto = + ClientAMProtocol.ComponentCountProto.newBuilder().setName(compName) + .setNumberOfContainers(numberOfContainers).build(); + ClientAMProtocol.FlexComponentsRequestProto requestProto = + ClientAMProtocol.FlexComponentsRequestProto.newBuilder() + .addComponents(componentCountProto).build(); + context.clientAMService.flexComponents(requestProto); + } + + public Component getComponent(String compName) { + return context.scheduler.getAllComponents().get(compName); + } + + public void waitForDependenciesSatisfied(String compName) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(new Supplier() { + @Override public Boolean get() { + return context.scheduler.getAllComponents().get(compName) + .areDependenciesReady(); + } + }, 1000, 20000); + } + + public void waitForNumDesiredContainers(String compName, + int numDesiredContainers) throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(new Supplier() { + @Override public Boolean get() { + return context.scheduler.getAllComponents().get(compName) + .getNumDesiredInstances() == numDesiredContainers; + } + }, 1000, 20000); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java new file mode 100644 index 00000000000..cf328809203 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java @@ -0,0 +1,262 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.commons.io.FileUtils; +import org.apache.curator.test.TestingCluster; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.MiniYARNCluster; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.utils.JsonSerDeser; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; +import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; +import org.codehaus.jackson.map.PropertyNamingStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URL; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_PMEM_CHECK_ENABLED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_VMEM_CHECK_ENABLED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_ENABLED; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.AM_RESOURCE_MEM; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ServiceTestUtils { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceTestUtils.class); + + private MiniYARNCluster yarnCluster = null; + private MiniDFSCluster hdfsCluster = null; + private FileSystem fs = null; + private Configuration conf = null; + public static final int NUM_NMS = 1; + private File basedir; + + public static final JsonSerDeser JSON_SER_DESER = + new JsonSerDeser<>(Service.class, + PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES); + + // Example service definition + // 2 components, each of which has 2 containers. + protected Service createExampleApplication() { + Service exampleApp = new Service(); + exampleApp.setName("example-app"); + exampleApp.addComponent(createComponent("compa")); + exampleApp.addComponent(createComponent("compb")); + return exampleApp; + } + + public static Component createComponent(String name) { + return createComponent(name, 2L, "sleep 1000"); + } + + protected static Component createComponent(String name, long numContainers, + String command) { + Component comp1 = new Component(); + comp1.setNumberOfContainers(numContainers); + comp1.setLaunchCommand(command); + comp1.setName(name); + Resource resource = new Resource(); + comp1.setResource(resource); + resource.setMemory("128"); + resource.setCpus(1); + return comp1; + } + + public static SliderFileSystem initMockFs() throws IOException { + return initMockFs(null); + } + + public static SliderFileSystem initMockFs(Service ext) throws IOException { + SliderFileSystem sfs = mock(SliderFileSystem.class); + FileSystem mockFs = mock(FileSystem.class); + JsonSerDeser jsonSerDeser = mock(JsonSerDeser.class); + when(sfs.getFileSystem()).thenReturn(mockFs); + when(sfs.buildClusterDirPath(anyObject())).thenReturn( + new Path("cluster_dir_path")); + if (ext != null) { + when(jsonSerDeser.load(anyObject(), anyObject())).thenReturn(ext); + } + ServiceApiUtil.setJsonSerDeser(jsonSerDeser); + return sfs; + } + + protected void setConf(YarnConfiguration conf) { + this.conf = conf; + } + + protected Configuration getConf() { + return conf; + } + + protected FileSystem getFS() { + return fs; + } + + protected MiniYARNCluster getYarnCluster() { + return yarnCluster; + } + + protected void setupInternal(int numNodeManager) + throws Exception { + LOG.info("Starting up YARN cluster"); + if (conf == null) { + setConf(new YarnConfiguration()); + } + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); + // reduce the teardown waiting time + conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000); + conf.set("yarn.log.dir", "target"); + // mark if we need to launch the v1 timeline server + // disable aux-service based timeline aggregators + conf.set(YarnConfiguration.NM_AUX_SERVICES, ""); + conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8"); + // Enable ContainersMonitorImpl + conf.set(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, + LinuxResourceCalculatorPlugin.class.getName()); + conf.set(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, + ProcfsBasedProcessTree.class.getName()); + conf.setBoolean( + YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, true); + conf.setBoolean(TIMELINE_SERVICE_ENABLED, false); + conf.setInt(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 100); + conf.setLong(DEBUG_NM_DELETE_DELAY_SEC, 60000); + conf.setLong(AM_RESOURCE_MEM, 526); + conf.setLong(YarnServiceConf.READINESS_CHECK_INTERVAL, 5); + // Disable vmem check to disallow NM killing the container + conf.setBoolean(NM_VMEM_CHECK_ENABLED, false); + conf.setBoolean(NM_PMEM_CHECK_ENABLED, false); + // setup zk cluster + TestingCluster zkCluster; + zkCluster = new TestingCluster(1); + zkCluster.start(); + conf.set(YarnConfiguration.RM_ZK_ADDRESS, zkCluster.getConnectString()); + conf.set(KEY_REGISTRY_ZK_QUORUM, zkCluster.getConnectString()); + LOG.info("ZK cluster: " + zkCluster.getConnectString()); + + fs = FileSystem.get(conf); + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + + if (yarnCluster == null) { + yarnCluster = + new MiniYARNCluster(TestYarnNativeServices.class.getSimpleName(), 1, + numNodeManager, 1, 1); + yarnCluster.init(conf); + yarnCluster.start(); + + waitForNMsToRegister(); + + URL url = Thread.currentThread().getContextClassLoader() + .getResource("yarn-site.xml"); + if (url == null) { + throw new RuntimeException( + "Could not find 'yarn-site.xml' dummy file in classpath"); + } + Configuration yarnClusterConfig = yarnCluster.getConfig(); + yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, + new File(url.getPath()).getParent()); + //write the document to a buffer (not directly to the file, as that + //can cause the file being written to get read -which will then fail. + ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); + yarnClusterConfig.writeXml(bytesOut); + bytesOut.close(); + //write the bytes to the file in the classpath + OutputStream os = new FileOutputStream(new File(url.getPath())); + os.write(bytesOut.toByteArray()); + os.close(); + LOG.info("Write yarn-site.xml configs to: " + url); + } + if (hdfsCluster == null) { + HdfsConfiguration hdfsConfig = new HdfsConfiguration(); + hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) + .numDataNodes(1).build(); + } + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); + } + } + + public void shutdown() throws IOException { + if (yarnCluster != null) { + try { + yarnCluster.stop(); + } finally { + yarnCluster = null; + } + } + if (hdfsCluster != null) { + try { + hdfsCluster.shutdown(); + } finally { + hdfsCluster = null; + } + } + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + SliderFileSystem sfs = new SliderFileSystem(conf); + Path appDir = sfs.getBaseApplicationPath(); + sfs.getFileSystem().delete(appDir, true); + } + + private void waitForNMsToRegister() throws Exception { + int sec = 60; + while (sec >= 0) { + if (yarnCluster.getResourceManager().getRMContext().getRMNodes().size() + >= NUM_NMS) { + break; + } + Thread.sleep(1000); + sec--; + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java new file mode 100644 index 00000000000..55c096e8ab3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java @@ -0,0 +1,480 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME; +import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +/** + * Test for ServiceApiUtil helper methods. + */ +public class TestServiceApiUtil { + private static final Logger LOG = LoggerFactory + .getLogger(TestServiceApiUtil.class); + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static final String LEN_64_STR = + "abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01"; + + private static final YarnConfiguration CONF_DEFAULT_DNS = new + YarnConfiguration(); + private static final YarnConfiguration CONF_DNS_ENABLED = new + YarnConfiguration(); + + @BeforeClass + public static void init() { + CONF_DNS_ENABLED.setBoolean(RegistryConstants.KEY_DNS_ENABLED, true); + } + + @Test(timeout = 90000) + public void testResourceValidation() throws Exception { + assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR + .length()); + + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + Service app = new Service(); + + // no name + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no name"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage()); + } + + // bad format name + String[] badNames = {"4finance", "Finance", "finance@home", LEN_64_STR}; + for (String badName : badNames) { + app.setName(badName); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with bad name " + badName); + } catch (IllegalArgumentException e) { + + } + } + + // launch command not specified + app.setName(LEN_64_STR); + Component comp = new Component().name("comp1"); + app.addComponent(comp); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // launch command not specified + app.setName(LEN_64_STR.substring(0, RegistryConstants + .MAX_FQDN_LABEL_LENGTH)); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // memory not specified + comp.setLaunchCommand("sleep 1"); + Resource res = new Resource(); + app.setResource(res); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, + comp.getName()), e.getMessage()); + } + + // invalid no of cpus + res.setMemory("100mb"); + res.setCpus(-2); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail( + EXCEPTION_PREFIX + "service with invalid no of cpus"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, + comp.getName()), e.getMessage()); + } + + // number of containers not specified + res.setCpus(2); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no container count"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .contains(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // specifying profile along with cpus/memory raises exception + res.setProfile("hbase_finance_large"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + + "service with resource profile along with cpus/memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, + comp.getName()), + e.getMessage()); + } + + // currently resource profile alone is not supported. + // TODO: remove the next test once resource profile alone is supported. + res.setCpus(null); + res.setMemory(null); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with resource profile only"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET, + e.getMessage()); + } + + // unset profile here and add cpus/memory back + res.setProfile(null); + res.setCpus(2); + res.setMemory("2gb"); + + // null number of containers + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "null number of containers"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); + } + } + + @Test + public void testArtifacts() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + Service app = new Service(); + app.setName("service1"); + Resource res = new Resource(); + app.setResource(res); + res.setMemory("512M"); + + // no artifact id fails with default type + Artifact artifact = new Artifact(); + app.setArtifact(artifact); + Component comp = ServiceTestUtils.createComponent("comp1"); + + app.setComponents(Collections.singletonList(comp)); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with SERVICE type + artifact.setType(Artifact.TypeEnum.SERVICE); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with TARBALL type + artifact.setType(Artifact.TypeEnum.TARBALL); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // everything valid here + artifact.setType(Artifact.TypeEnum.DOCKER); + artifact.setId("docker.io/centos:centos7"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + LOG.error("service attributes specified should be valid here", e); + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME); + } + + private static Resource createValidResource() { + Resource res = new Resource(); + res.setMemory("512M"); + return res; + } + + private static Component createValidComponent(String compName) { + Component comp = new Component(); + comp.setName(compName); + comp.setResource(createValidResource()); + comp.setNumberOfContainers(1L); + comp.setLaunchCommand("sleep 1"); + return comp; + } + + private static Service createValidApplication(String compName) { + Service app = new Service(); + app.setName("name"); + app.setResource(createValidResource()); + if (compName != null) { + app.addComponent(createValidComponent(compName)); + } + return app; + } + + @Test + public void testExternalApplication() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = ServiceTestUtils.initMockFs(ext); + + Service app = createValidApplication(null); + + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.setArtifact(artifact); + app.addComponent(ServiceTestUtils.createComponent("comp2")); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + assertNotNull(app.getComponent("comp2")); + } + + @Test + public void testDuplicateComponents() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + String compName = "comp1"; + Service app = createValidApplication(compName); + app.addComponent(createValidComponent(compName)); + + // duplicate component name fails + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with component collision"); + } catch (IllegalArgumentException e) { + assertEquals("Component name collision: " + compName, e.getMessage()); + } + } + + @Test + public void testExternalDuplicateComponent() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = ServiceTestUtils.initMockFs(ext); + + Service app = createValidApplication("comp1"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.getComponent("comp1").setArtifact(artifact); + + // duplicate component name okay in the case of SERVICE component + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } + + @Test + public void testExternalComponent() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = ServiceTestUtils.initMockFs(ext); + + Service app = createValidApplication("comp2"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // artifact ID not inherited from global + assertNotNull(app.getComponent("comp2")); + + // set SERVICE artifact id on component + app.getComponent("comp2").setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // original component replaced by external component + assertNotNull(app.getComponent("comp1")); + } + + public static void verifyDependencySorting(List components, + Component... expectedSorting) { + Collection actualSorting = ServiceApiUtil.sortByDependencies( + components); + assertEquals(expectedSorting.length, actualSorting.size()); + int i = 0; + for (Component component : actualSorting) { + assertEquals(expectedSorting[i++], component); + } + } + + @Test + public void testDependencySorting() throws IOException { + Component a = ServiceTestUtils.createComponent("a"); + Component b = ServiceTestUtils.createComponent("b"); + Component c = ServiceTestUtils.createComponent("c"); + Component d = + ServiceTestUtils.createComponent("d").dependencies(Arrays.asList("c")); + Component e = ServiceTestUtils.createComponent("e") + .dependencies(Arrays.asList("b", "d")); + + verifyDependencySorting(Arrays.asList(a, b, c), a, b, c); + verifyDependencySorting(Arrays.asList(c, a, b), c, a, b); + verifyDependencySorting(Arrays.asList(a, b, c, d, e), a, b, c, d, e); + verifyDependencySorting(Arrays.asList(e, d, c, b, a), c, b, a, d, e); + + c.setDependencies(Arrays.asList("e")); + try { + verifyDependencySorting(Arrays.asList(a, b, c, d, e)); + Assert.fail(EXCEPTION_PREFIX + "components with dependency cycle"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_CYCLE, Arrays.asList(c, d, + e)), ex.getMessage()); + } + + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + Service service = createValidApplication(null); + service.setComponents(Arrays.asList(c, d, e)); + try { + ServiceApiUtil.validateAndResolveService(service, sfs, + CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, "b", "e"), ex + .getMessage()); + } + } + + @Test + public void testInvalidComponent() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + testComponent(sfs); + } + + @Test + public void testValidateCompName() { + String[] invalidNames = { + "EXAMPLE", // UPPER case not allowed + "example_app" // underscore not allowed. + }; + for (String name : invalidNames) { + try { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + Assert.fail(); + } catch (IllegalArgumentException ex) { + ex.printStackTrace(); + } + } + } + + private static void testComponent(SliderFileSystem sfs) + throws IOException { + int maxLen = RegistryConstants.MAX_FQDN_LABEL_LENGTH; + assertEquals(19, Long.toString(Long.MAX_VALUE).length()); + maxLen = maxLen - Long.toString(Long.MAX_VALUE).length(); + + String compName = LEN_64_STR.substring(0, maxLen + 1); + Service app = createValidApplication(null); + app.addComponent(createValidComponent(compName)); + + // invalid component name fails if dns is enabled + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with invalid component name"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_INVALID, maxLen, compName), e.getMessage()); + } + + // does not fail if dns is disabled + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + compName = LEN_64_STR.substring(0, maxLen); + app = createValidApplication(null); + app.addComponent(createValidComponent(compName)); + + // does not fail + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java new file mode 100644 index 00000000000..f98d90a6f34 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -0,0 +1,403 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.*; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.yarn.api.records.YarnApplicationState.FINISHED; + +/** + * End to end tests to test deploying services with MiniYarnCluster and a in-JVM + * ZK testing cluster. + */ +public class TestYarnNativeServices extends ServiceTestUtils { + + private static final Logger LOG = + LoggerFactory.getLogger(TestYarnNativeServices.class); + + @Rule + public TemporaryFolder tmpFolder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + File tmpYarnDir = new File("target", "tmp"); + FileUtils.deleteQuietly(tmpYarnDir); + } + + @After + public void tearDown() throws IOException { + shutdown(); + } + + // End-to-end test to use ServiceClient to deploy a service. + // 1. Create a service with 2 components, each of which has 2 containers + // 2. Flex up each component to 3 containers and check the component instance names + // 3. Flex down each component to 1 container and check the component instance names + // 4. Flex up each component to 2 containers and check the component instance names + // 5. Stop the service + // 6. Destroy the service + @Test (timeout = 200000) + public void testCreateFlexStopDestroyService() throws Exception { + setupInternal(NUM_NMS); + ServiceClient client = createClient(); + Service exampleApp = createExampleApplication(); + client.actionCreate(exampleApp); + SliderFileSystem fileSystem = new SliderFileSystem(getConf()); + Path appDir = fileSystem.buildClusterDirPath(exampleApp.getName()); + // check app.json is persisted. + Assert.assertTrue( + getFS().exists(new Path(appDir, exampleApp.getName() + ".json"))); + waitForAllCompToBeReady(client, exampleApp); + + // Flex two components, each from 2 container to 3 containers. + flexComponents(client, exampleApp, 3L); + // wait for flex to be completed, increase from 2 to 3 containers. + waitForAllCompToBeReady(client, exampleApp); + // check all instances name for each component are in sequential order. + checkCompInstancesInOrder(client, exampleApp); + + // flex down to 1 + flexComponents(client, exampleApp, 1L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // check component dir and registry are cleaned up. + + // flex up again to 2 + flexComponents(client, exampleApp, 2L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // stop the service + LOG.info("Stop the service"); + client.actionStop(exampleApp.getName(), true); + ApplicationReport report = client.getYarnClient() + .getApplicationReport(ApplicationId.fromString(exampleApp.getId())); + // AM unregisters with RM successfully + Assert.assertEquals(FINISHED, report.getYarnApplicationState()); + Assert.assertEquals(FinalApplicationStatus.ENDED, + report.getFinalApplicationStatus()); + + LOG.info("Destroy the service"); + //destroy the service and check the app dir is deleted from fs. + client.actionDestroy(exampleApp.getName()); + // check the service dir on hdfs (in this case, local fs) are deleted. + Assert.assertFalse(getFS().exists(appDir)); + } + + // Create compa with 2 containers + // Create compb with 2 containers which depends on compa + // Check containers for compa started before containers for compb + @Test (timeout = 200000) + public void testComponentStartOrder() throws Exception { + setupInternal(NUM_NMS); + ServiceClient client = createClient(); + Service exampleApp = new Service(); + exampleApp.setName("teststartorder"); + exampleApp.addComponent(createComponent("compa", 2, "sleep 1000")); + Component compb = createComponent("compb", 2, "sleep 1000"); + + // Let compb depedends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + client.actionCreate(exampleApp); + waitForAllCompToBeReady(client, exampleApp); + + // check that containers for compa are launched before containers for compb + checkContainerLaunchDependencies(client, exampleApp, "compa", "compb"); + + client.actionStop(exampleApp.getName(), true); + client.actionDestroy(exampleApp.getName()); + } + + // Test to verify recovery of SeviceMaster after RM is restarted. + // 1. Create an example service. + // 2. Restart RM. + // 3. Fail the application attempt. + // 4. Verify ServiceMaster recovers. + @Test(timeout = 200000) + public void testRecoverComponentsAfterRMRestart() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); + conf.setBoolean( + YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true); + conf.setLong(YarnConfiguration.NM_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, + 500L); + + conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true); + conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true); + setConf(conf); + setupInternal(NUM_NMS); + + ServiceClient client = createClient(); + Service exampleApp = createExampleApplication(); + client.actionCreate(exampleApp); + waitForAllCompToBeReady(client, exampleApp); + + LOG.info("Restart the resource manager"); + getYarnCluster().restartResourceManager( + getYarnCluster().getActiveRMIndex()); + GenericTestUtils.waitFor(() -> + getYarnCluster().getResourceManager().getServiceState() == + org.apache.hadoop.service.Service.STATE.STARTED, 2000, 200000); + Assert.assertTrue("node managers connected", + getYarnCluster().waitForNodeManagersToConnect(5000)); + + ApplicationId exampleAppId = ApplicationId.fromString(exampleApp.getId()); + ApplicationAttemptId applicationAttemptId = client.getYarnClient() + .getApplicationReport(exampleAppId).getCurrentApplicationAttemptId(); + + Multimap containersBeforeFailure = getContainersForAllComp( + client, exampleApp); + + LOG.info("Fail the application attempt {}", applicationAttemptId); + client.getYarnClient().failApplicationAttempt(applicationAttemptId); + //wait until attempt 2 is running + GenericTestUtils.waitFor(() -> { + try { + ApplicationReport ar = client.getYarnClient() + .getApplicationReport(exampleAppId); + return ar.getCurrentApplicationAttemptId().getAttemptId() == 2 && + ar.getYarnApplicationState() == YarnApplicationState.RUNNING; + } catch (YarnException | IOException e) { + throw new RuntimeException("while waiting", e); + } + }, 2000, 200000); + + Multimap containersAfterFailure = getContainersForAllComp( + client, exampleApp); + Assert.assertEquals("component container affected by restart", + containersBeforeFailure, containersAfterFailure); + + LOG.info("Stop/destroy service {}", exampleApp); + client.actionStop(exampleApp.getName(), true); + client.actionDestroy(exampleApp.getName()); + } + + // Check containers launched are in dependency order + // Get all containers into a list and sort based on container launch time e.g. + // compa-c1, compa-c2, compb-c1, compb-c2; + // check that the container's launch time are align with the dependencies. + private void checkContainerLaunchDependencies(ServiceClient client, + Service exampleApp, String... compOrder) + throws IOException, YarnException { + Service retrievedApp = client.getStatus(exampleApp.getName()); + List containerList = new ArrayList<>(); + for (Component component : retrievedApp.getComponents()) { + containerList.addAll(component.getContainers()); + } + // sort based on launchTime + containerList + .sort((o1, o2) -> o1.getLaunchTime().compareTo(o2.getLaunchTime())); + LOG.info("containerList: " + containerList); + // check the containers are in the dependency order. + int index = 0; + for (String comp : compOrder) { + long num = retrievedApp.getComponent(comp).getNumberOfContainers(); + for (int i = 0; i < num; i++) { + String compInstanceName = containerList.get(index).getComponentInstanceName(); + String compName = + compInstanceName.substring(0, compInstanceName.lastIndexOf('-')); + Assert.assertEquals(comp, compName); + index++; + } + } + } + + + private Map flexComponents(ServiceClient client, + Service exampleApp, long count) throws YarnException, IOException { + Map compCounts = new HashMap<>(); + compCounts.put("compa", count); + compCounts.put("compb", count); + // flex will update the persisted conf to reflect latest number of containers. + exampleApp.getComponent("compa").setNumberOfContainers(count); + exampleApp.getComponent("compb").setNumberOfContainers(count); + client.flexByRestService(exampleApp.getName(), compCounts); + return compCounts; + } + + // Check each component's comp instances name are in sequential order. + // E.g. If there are two instances compA-1 and compA-2 + // When flex up to 4 instances, it should be compA-1 , compA-2, compA-3, compA-4 + // When flex down to 3 instances, it should be compA-1 , compA-2, compA-3. + private void checkCompInstancesInOrder(ServiceClient client, + Service exampleApp) throws IOException, YarnException { + Service service = client.getStatus(exampleApp.getName()); + for (Component comp : service.getComponents()) { + checkEachCompInstancesInOrder(comp); + } + } + + private void checkRegistryAndCompDirDeleted() { + + } + + private void checkEachCompInstancesInOrder(Component component) { + long expectedNumInstances = component.getNumberOfContainers(); + Assert.assertEquals(expectedNumInstances, component.getContainers().size()); + TreeSet instances = new TreeSet<>(); + for (Container container : component.getContainers()) { + instances.add(container.getComponentInstanceName()); + } + + int i = 0; + for (String s : instances) { + Assert.assertEquals(component.getName() + "-" + i, s); + i++; + } + } + + private void waitForOneCompToBeReady(ServiceClient client, + Service exampleApp, String readyComp) + throws TimeoutException, InterruptedException { + long numExpectedContainers = + exampleApp.getComponent(readyComp).getNumberOfContainers(); + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + Component retrievedComp = retrievedApp.getComponent(readyComp); + + if (retrievedComp.getContainers() != null + && retrievedComp.getContainers().size() == numExpectedContainers) { + LOG.info(readyComp + " found " + numExpectedContainers + + " containers running"); + return true; + } else { + LOG.info(" Waiting for " + readyComp + "'s containers to be running"); + return false; + } + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + } + + // wait until all the containers for all components become ready state + private void waitForAllCompToBeReady(ServiceClient client, + Service exampleApp) throws TimeoutException, InterruptedException { + int expectedTotalContainers = countTotalContainers(exampleApp); + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + int totalReadyContainers = 0; + LOG.info("Num Components " + retrievedApp.getComponents().size()); + for (Component component : retrievedApp.getComponents()) { + LOG.info("looking for " + component.getName()); + LOG.info(component.toString()); + if (component.getContainers() != null) { + if (component.getContainers().size() == exampleApp + .getComponent(component.getName()).getNumberOfContainers()) { + for (Container container : component.getContainers()) { + LOG.info( + "Container state " + container.getState() + ", component " + + component.getName()); + if (container.getState() == ContainerState.READY) { + totalReadyContainers++; + LOG.info("Found 1 ready container " + container.getId()); + } + } + } else { + LOG.info(component.getName() + " Expected number of containers " + + exampleApp.getComponent(component.getName()) + .getNumberOfContainers() + ", current = " + component + .getContainers()); + } + } + } + LOG.info("Exit loop, totalReadyContainers= " + totalReadyContainers + + " expected = " + expectedTotalContainers); + return totalReadyContainers == expectedTotalContainers; + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + } + + /** + * Get all containers of a service. + */ + private Multimap getContainersForAllComp(ServiceClient client, + Service example) throws IOException, YarnException { + + Multimap allContainers = HashMultimap.create(); + Service retrievedApp = client.getStatus(example.getName()); + retrievedApp.getComponents().forEach(component -> { + if (component.getContainers() != null) { + component.getContainers().forEach(container -> { + allContainers.put(component.getName(), container.getId()); + }); + } + }); + return allContainers; + } + + private ServiceClient createClient() throws Exception { + ServiceClient client = new ServiceClient() { + @Override protected Path addJarResource(String appName, + Map localResources) + throws IOException, SliderException { + // do nothing, the Unit test will use local jars + return null; + } + }; + client.init(getConf()); + client.start(); + return client; + } + + + private int countTotalContainers(Service service) { + int totalContainers = 0; + for (Component component : service.getComponents()) { + totalContainers += component.getNumberOfContainers(); + } + return totalContainers; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java new file mode 100644 index 00000000000..1f4581ec55c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.client.api.AppAdminClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; + +/** + * Test for building / resolving components of type SERVICE. + */ +public class TestBuildExternalComponents { + + protected Configuration conf = new YarnConfiguration(); + private File basedir; + + // Check component names match with expected + private static void checkComponentNames(List components, + Set expectedComponents) { + Assert.assertEquals(expectedComponents.size(), components.size()); + for (Component comp : components) { + Assert.assertTrue(expectedComponents.contains(comp.getName())); + } + } + + // 1. Build the def file and store on fs + // 2. check component names + private void buildAndCheckComponents(String appName, String appDef, + SliderFileSystem sfs, Set names) throws Throwable { + AppAdminClient client = AppAdminClient.createAppAdminClient(AppAdminClient + .DEFAULT_TYPE, conf); + client.actionSave(ExampleAppJson.resourceName(appDef), null, null, + null); + + // verify generated conf + List components = + ServiceApiUtil.getComponents(sfs, appName); + checkComponentNames(components, names); + } + + @Before + public void setup() throws IOException { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Test applications defining external components(SERVICE type) + // can be resolved correctly + @Test + public void testExternalComponentBuild() throws Throwable { + SliderFileSystem sfs = new SliderFileSystem(conf); + + Set nameSet = new HashSet<>(); + nameSet.add("simple"); + nameSet.add("master"); + nameSet.add("worker"); + + // app-1 has 3 components: simple, master, worker + buildAndCheckComponents("app-1", ExampleAppJson.APP_JSON, sfs, nameSet); + buildAndCheckComponents("external-0", ExampleAppJson.EXTERNAL_JSON_0, sfs, + nameSet); + + nameSet.add("other"); + + // external1 has 3 components: simple(SERVICE - app1), master and other + buildAndCheckComponents("external-1", ExampleAppJson.EXTERNAL_JSON_1, sfs, + nameSet); + + nameSet.add("another"); + + // external2 has 2 components: ext(SERVICE - external1), another + buildAndCheckComponents("external-2", ExampleAppJson.EXTERNAL_JSON_2, sfs, + nameSet); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java new file mode 100644 index 00000000000..df4b1df8307 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.yarn.client.cli.ApplicationCLI; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; + +public class TestServiceCLI { + private static final Logger LOG = LoggerFactory.getLogger(TestServiceCLI + .class); + + private Configuration conf = new YarnConfiguration(); + private File basedir; + private SliderFileSystem fs; + private String basedirProp; + + private void runCLI(String[] args) throws Exception { + LOG.info("running CLI: yarn {}", Arrays.asList(args)); + ApplicationCLI cli = new ApplicationCLI(); + cli.setSysOutPrintStream(System.out); + cli.setSysErrPrintStream(System.err); + int res = ToolRunner.run(cli, ApplicationCLI.preProcessArgs(args)); + cli.stop(); + } + + private void buildApp(String serviceName, String appDef) throws Throwable { + String[] args = {"app", "-D", basedirProp, "-save", serviceName, + ExampleAppJson.resourceName(appDef)}; + runCLI(args); + } + + private void buildApp(String serviceName, String appDef, String lifetime, + String queue) throws Throwable { + String[] args = {"app", "-D", basedirProp, "-save", serviceName, + ExampleAppJson.resourceName(appDef), "-updateLifetime", lifetime, + "-changeQueue", queue}; + runCLI(args); + } + + @Before + public void setup() throws Throwable { + basedir = new File("target", "apps"); + basedirProp = YARN_SERVICE_BASE_PATH + "=" + basedir.getAbsolutePath(); + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + fs = new SliderFileSystem(conf); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + @Test + public void testFlexComponents() throws Throwable { + // currently can only test building apps, since that is the only + // operation that doesn't require an RM + // TODO: expand CLI test to try other commands + String serviceName = "app-1"; + buildApp(serviceName, ExampleAppJson.APP_JSON); + checkApp(serviceName, "master", 1L, 3600L, null); + + serviceName = "app-2"; + buildApp(serviceName, ExampleAppJson.APP_JSON, "1000", "qname"); + checkApp(serviceName, "master", 1L, 1000L, "qname"); + } + + private void checkApp(String serviceName, String compName, long count, Long + lifetime, String queue) throws IOException { + Service service = ServiceApiUtil.loadService(fs, serviceName); + Assert.assertEquals(serviceName, service.getName()); + Assert.assertEquals(lifetime, service.getLifetime()); + Assert.assertEquals(queue, service.getQueue()); + List components = service.getComponents(); + for (Component component : components) { + if (component.getName().equals(compName)) { + Assert.assertEquals(count, component.getNumberOfContainers() + .longValue()); + return; + } + } + Assert.fail(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java new file mode 100644 index 00000000000..5fdd2ab0c7c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + + +import org.apache.hadoop.yarn.service.api.records.Service; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.yarn.service.ServiceTestUtils.JSON_SER_DESER; + +/** + * Names of the example configs. + */ +public final class ExampleAppJson { + + public static final String APP_JSON = "app.json"; + public static final String OVERRIDE_JSON = "app-override.json"; + public static final String DEFAULT_JSON = "default.json"; + public static final String EXTERNAL_JSON_0 = "external0.json"; + public static final String EXTERNAL_JSON_1 = "external1.json"; + public static final String EXTERNAL_JSON_2 = "external2.json"; + + public static final String PACKAGE = "/org/apache/hadoop/yarn/service/conf/examples/"; + + + private static final String[] ALL_EXAMPLES = {APP_JSON, OVERRIDE_JSON, + DEFAULT_JSON}; + + public static final List ALL_EXAMPLE_RESOURCES = new ArrayList<>(); + static { + for (String example : ALL_EXAMPLES) { + ALL_EXAMPLE_RESOURCES.add(PACKAGE + example); + } + } + + private ExampleAppJson() { + } + + public static Service loadResource(String name) throws IOException { + return JSON_SER_DESER.fromResource(PACKAGE + name); + } + + public static String resourceName(String name) { + return "target/test-classes" + PACKAGE + name; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java new file mode 100644 index 00000000000..18318aa05eb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.ServiceTestUtils; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.ExampleAppJson.*; + +/** + * Test global configuration resolution. + */ +public class TestAppJsonResolve extends Assert { + protected static final Logger LOG = + LoggerFactory.getLogger(TestAppJsonResolve.class); + + @Test + public void testOverride() throws Throwable { + Service orig = ExampleAppJson.loadResource(OVERRIDE_JSON); + + Configuration global = orig.getConfiguration(); + assertEquals("a", global.getProperty("g1")); + assertEquals("b", global.getProperty("g2")); + assertEquals(2, global.getFiles().size()); + + Configuration simple = orig.getComponent("simple").getConfiguration(); + assertEquals(0, simple.getProperties().size()); + assertEquals(1, simple.getFiles().size()); + + Configuration master = orig.getComponent("master").getConfiguration(); + assertEquals("m", master.getProperty("name")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals(0, master.getFiles().size()); + + Configuration worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(3, worker.getProperties().size()); + assertEquals(0, worker.getFiles().size()); + + assertEquals("worker", worker.getProperty("name")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertNull(worker.getProperty("g2")); + assertEquals("1000", worker.getProperty("timeout")); + + // here is the resolution + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + ServiceApiUtil.validateAndResolveService(orig, sfs, new + YarnConfiguration()); + + global = orig.getConfiguration(); + LOG.info("global = {}", global); + assertEquals("a", global.getProperty("g1")); + assertEquals("b", global.getProperty("g2")); + assertEquals(2, global.getFiles().size()); + + simple = orig.getComponent("simple").getConfiguration(); + assertEquals(2, simple.getProperties().size()); + assertEquals("a", simple.getProperty("g1")); + assertEquals("b", simple.getProperty("g2")); + assertEquals(2, simple.getFiles().size()); + + Set files = new HashSet<>(); + Map props = new HashMap<>(); + props.put("k1", "overridden"); + props.put("k2", "v2"); + files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum + .PROPERTIES).properties(props)); + files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum + .XML).properties(Collections.singletonMap("k3", "v3"))); + assertTrue(files.contains(simple.getFiles().get(0))); + assertTrue(files.contains(simple.getFiles().get(1))); + + master = orig.getComponent("master").getConfiguration(); + LOG.info("master = {}", master); + assertEquals(3, master.getProperties().size()); + assertEquals("m", master.getProperty("name")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals("b", master.getProperty("g2")); + assertEquals(2, master.getFiles().size()); + + props.put("k1", "v1"); + files.clear(); + files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum + .PROPERTIES).properties(props)); + files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum + .XML).properties(Collections.singletonMap("k3", "v3"))); + + assertTrue(files.contains(master.getFiles().get(0))); + assertTrue(files.contains(master.getFiles().get(1))); + + worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(4, worker.getProperties().size()); + + assertEquals("worker", worker.getProperty("name")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertEquals("b", worker.getProperty("g2")); + assertEquals("1000", worker.getProperty("timeout")); + assertEquals(2, worker.getFiles().size()); + + assertTrue(files.contains(worker.getFiles().get(0))); + assertTrue(files.contains(worker.getFiles().get(1))); + } + + @Test + public void testOverrideExternalConfiguration() throws IOException { + Service orig = ExampleAppJson.loadResource(EXTERNAL_JSON_1); + + Configuration global = orig.getConfiguration(); + assertEquals(0, global.getProperties().size()); + + assertEquals(3, orig.getComponents().size()); + + Configuration simple = orig.getComponent("simple").getConfiguration(); + assertEquals(0, simple.getProperties().size()); + + Configuration master = orig.getComponent("master").getConfiguration(); + assertEquals(1, master.getProperties().size()); + assertEquals("is-overridden", master.getProperty("g3")); + + Configuration other = orig.getComponent("other").getConfiguration(); + assertEquals(0, other.getProperties().size()); + + // load the external service + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + Service ext = ExampleAppJson.loadResource(APP_JSON); + ServiceApiUtil.validateAndResolveService(ext, sfs, new + YarnConfiguration()); + + // perform the resolution on original service + sfs = ServiceTestUtils.initMockFs(ext); + ServiceApiUtil.validateAndResolveService(orig, sfs, new + YarnConfiguration()); + + global = orig.getConfiguration(); + assertEquals(0, global.getProperties().size()); + + assertEquals(4, orig.getComponents().size()); + + simple = orig.getComponent("simple").getConfiguration(); + assertEquals(3, simple.getProperties().size()); + assertEquals("a", simple.getProperty("g1")); + assertEquals("b", simple.getProperty("g2")); + assertEquals("60", + simple.getProperty("yarn.service.failure-count-reset.window")); + + master = orig.getComponent("master").getConfiguration(); + assertEquals(5, master.getProperties().size()); + assertEquals("512M", master.getProperty("jvm.heapsize")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals("b", master.getProperty("g2")); + assertEquals("is-overridden", master.getProperty("g3")); + assertEquals("60", + simple.getProperty("yarn.service.failure-count-reset.window")); + + Configuration worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(4, worker.getProperties().size()); + assertEquals("512M", worker.getProperty("jvm.heapsize")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertEquals("b", worker.getProperty("g2")); + assertEquals("60", + worker.getProperty("yarn.service.failure-count-reset.window")); + + other = orig.getComponent("other").getConfiguration(); + assertEquals(0, other.getProperties().size()); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java new file mode 100644 index 00000000000..a813da3bff1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.ServiceTestUtils; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + +import static org.apache.hadoop.yarn.service.ServiceTestUtils.JSON_SER_DESER; + +/** + * Test loading example resources. + */ +@RunWith(value = Parameterized.class) +public class TestLoadExampleAppJson extends Assert { + private String resource; + + public TestLoadExampleAppJson(String resource) { + this.resource = resource; + } + + @Parameterized.Parameters + public static Collection filenames() { + String[][] stringArray = new String[ExampleAppJson + .ALL_EXAMPLE_RESOURCES.size()][1]; + int i = 0; + for (String s : ExampleAppJson.ALL_EXAMPLE_RESOURCES) { + stringArray[i++][0] = s; + } + return Arrays.asList(stringArray); + } + + @Test + public void testLoadResource() throws Throwable { + try { + Service service = JSON_SER_DESER.fromResource(resource); + + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + ServiceApiUtil.validateAndResolveService(service, sfs, + new YarnConfiguration()); + } catch (Exception e) { + throw new Exception("exception loading " + resource + ":" + e.toString()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java new file mode 100644 index 00000000000..d7fa9a04dbb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +/** + * Test cluster name validation. + */ +public class TestValidateServiceNames { + + void assertValidName(String name) { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + } + + void assertInvalidName(String name) { + try { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + Assert.fail(); + } catch (IllegalArgumentException e) { + // + } + } + + void assertInvalid(List names) { + for (String name : names) { + assertInvalidName(name); + } + } + + void assertValid(List names) { + for (String name : names) { + assertValidName(name); + } + } + + @Test + public void testEmptyName() throws Throwable { + assertInvalidName(""); + } + + @Test + public void testSpaceName() throws Throwable { + assertInvalidName(" "); + } + + + @Test + public void testLeadingHyphen() throws Throwable { + assertInvalidName("-hyphen"); + } + + @Test + public void testTitleLetters() throws Throwable { + assertInvalidName("Title"); + } + + @Test + public void testCapitalLetters() throws Throwable { + assertInvalidName("UPPER-CASE-CLUSTER"); + } + + @Test + public void testInnerBraced() throws Throwable { + assertInvalidName("a[a"); + } + + @Test + public void testLeadingBrace() throws Throwable { + assertInvalidName("["); + } + + @Test + public void testNonalphaLeadingChars() throws Throwable { + assertInvalid(Arrays.asList( + "[a", "#", "@", "=", "*", "." + )); + } + + @Test + public void testNonalphaInnerChars() throws Throwable { + assertInvalid(Arrays.asList( + "a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i" + )); + } + + @Test + public void testClusterValid() throws Throwable { + assertValidName("cluster"); + } + + @Test + public void testValidNames() throws Throwable { + assertValid(Arrays.asList( + "cluster", + "cluster1", + "very-very-very-long-cluster-name", + "c1234567890" + )); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java new file mode 100644 index 00000000000..0e03a2c1b61 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.yarn.service.monitor; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.MockServiceAM; +import org.apache.hadoop.yarn.service.ServiceTestUtils; + +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; + +public class TestServiceMonitor extends ServiceTestUtils { + + private File basedir; + YarnConfiguration conf = new YarnConfiguration(); + + @Before + public void setup() throws Exception { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.setLong(YarnServiceConf.READINESS_CHECK_INTERVAL, 2); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Create compa with 1 container + // Create compb with 1 container + // Verify compb dependency satisfied + // Increase compa to 2 containers + // Verify compb dependency becomes unsatisfied. + @Test + public void testComponentDependency() throws Exception{ + ApplicationId applicationId = ApplicationId.newInstance(123456, 1); + Service exampleApp = new Service(); + exampleApp.setId(applicationId.toString()); + exampleApp.setName("testComponentDependency"); + exampleApp.addComponent(createComponent("compa", 1, "sleep 1000")); + Component compb = createComponent("compb", 1, "sleep 1000"); + + // Let compb depends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + MockServiceAM am = new MockServiceAM(exampleApp); + am.init(conf); + am.start(); + + // compa ready + Assert.assertTrue(am.getComponent("compa").areDependenciesReady()); + //compb not ready + Assert.assertFalse(am.getComponent("compb").areDependenciesReady()); + + // feed 1 container to compa, + am.feedContainerToComp(exampleApp, 1, "compa"); + // waiting for compb's dependencies are satisfied + am.waitForDependenciesSatisfied("compb"); + + // feed 1 container to compb + am.feedContainerToComp(exampleApp, 2, "compb"); + am.flexComponent("compa", 2); + am.waitForNumDesiredContainers("compa", 2); + + // compb dependencies not satisfied again. + Assert.assertFalse(am.getComponent("compb").areDependenciesReady()); + am.stop(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java new file mode 100644 index 00000000000..79406e99e73 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.providers; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test the AbstractClientProvider shared methods. + */ +public class TestAbstractClientProvider { + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static class ClientProvider extends AbstractClientProvider { + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) + throws IOException { + } + + @Override + protected void validateConfigFile(ConfigFile configFile, + FileSystem fileSystem) throws IOException { + } + } + + @Test + public void testConfigFiles() throws IOException { + ClientProvider clientProvider = new ClientProvider(); + FileSystem mockFs = mock(FileSystem.class); + when(mockFs.exists(anyObject())).thenReturn(true); + + ConfigFile configFile = new ConfigFile(); + List configFiles = new ArrayList<>(); + configFiles.add(configFile); + + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "null file type"); + } catch (IllegalArgumentException e) { + } + + configFile.setType(ConfigFile.TypeEnum.TEMPLATE); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "empty src_file for type template"); + } catch (IllegalArgumentException e) { + } + + configFile.setSrcFile("srcfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "empty dest file"); + } catch (IllegalArgumentException e) { + } + + configFile.setDestFile("destfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + configFile = new ConfigFile(); + configFile.setType(ConfigFile.TypeEnum.JSON); + configFile.setSrcFile(null); + configFile.setDestFile("path/destfile2"); + configFiles.add(configFile); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "dest file with multiple path elements"); + } catch (IllegalArgumentException e) { + } + + configFile.setDestFile("/path/destfile2"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + configFile.setDestFile("destfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "duplicate dest file"); + } catch (IllegalArgumentException e) { + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java new file mode 100644 index 00000000000..56f4555b16c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.providers; + +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultClientProvider; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderService; +import org.apache.hadoop.yarn.service.provider.docker.DockerClientProvider; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderService; +import org.apache.hadoop.yarn.service.provider.tarball.TarballClientProvider; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderService; + +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +/** + * Test provider factories. + */ +public class TestProviderFactory { + @Test + public void testDockerFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(new Artifact().type(TypeEnum.DOCKER)); + assertTrue(factory instanceof DockerProviderFactory); + assertTrue(factory.createClientProvider() instanceof DockerClientProvider); + assertTrue(factory.createServerProvider() instanceof DockerProviderService); + assertTrue(ProviderFactory.getProviderService(new Artifact() + .type(TypeEnum.DOCKER)) instanceof DockerProviderService); + } + + @Test + public void testTarballFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(new Artifact().type(TypeEnum.TARBALL)); + assertTrue(factory instanceof TarballProviderFactory); + assertTrue(factory.createClientProvider() instanceof TarballClientProvider); + assertTrue(factory.createServerProvider() instanceof + TarballProviderService); + assertTrue(ProviderFactory.getProviderService(new Artifact() + .type(TypeEnum.TARBALL)) instanceof TarballProviderService); + } + + @Test + public void testDefaultFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(null); + assertTrue(factory instanceof DefaultProviderFactory); + assertTrue(factory.createClientProvider() instanceof DefaultClientProvider); + assertTrue(factory.createServerProvider() instanceof DefaultProviderService); + assertTrue(ProviderFactory.getProviderService(null) instanceof + DefaultProviderService); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java new file mode 100644 index 00000000000..80b4f5103a3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier; +import org.apache.hadoop.yarn.client.api.TimelineV2Client; +import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.api.records.PlacementPolicy; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test class for ServiceTimelinePublisher. + */ +public class TestServiceTimelinePublisher { + private TimelineV2Client timelineClient; + private Configuration config; + private ServiceTimelinePublisher serviceTimelinePublisher; + private static String SERVICE_NAME = "HBASE"; + private static String SERVICEID = "application_1490093646524_0005"; + private static String ARTIFACTID = "ARTIFACTID"; + private static String COMPONENT_NAME = "DEFAULT"; + private static String CONTAINER_ID = + "container_e02_1490093646524_0005_01_000001"; + private static String CONTAINER_IP = + "localhost"; + private static String CONTAINER_HOSTNAME = + "cnl124-localhost.site"; + private static String CONTAINER_BAREHOST = + "localhost.com"; + + @Before + public void setUp() throws Exception { + config = new Configuration(); + config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f); + timelineClient = + new DummyTimelineClient(ApplicationId.fromString(SERVICEID)); + serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient); + serviceTimelinePublisher.init(config); + serviceTimelinePublisher.start(); + } + + @After + public void tearDown() throws Exception { + if (serviceTimelinePublisher != null) { + serviceTimelinePublisher.stop(); + } + if (timelineClient != null) { + timelineClient.stop(); + } + } + + @Test + public void testServiceAttemptEntity() { + Service service = createMockApplication(); + serviceTimelinePublisher + .serviceAttemptRegistered(service, new YarnConfiguration()); + + Collection lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + // 2 entities because during registration component also registered. + assertEquals(2, lastPublishedEntities.size()); + for (TimelineEntity timelineEntity : lastPublishedEntities) { + if (timelineEntity.getType() == ServiceTimelineEntityType.COMPONENT + .toString()) { + verifyComponentTimelineEntity(timelineEntity); + } else { + verifyServiceAttemptTimelineEntity(timelineEntity, null, true); + } + } + + ServiceContext context = new ServiceContext(); + context.attemptId = ApplicationAttemptId + .newInstance(ApplicationId.fromString(service.getId()), 1); + String exitDiags = "service killed"; + serviceTimelinePublisher.serviceAttemptUnregistered(context, exitDiags); + lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + for (TimelineEntity timelineEntity : lastPublishedEntities) { + if (timelineEntity.getType() == ServiceTimelineEntityType.SERVICE_ATTEMPT + .toString()) { + verifyServiceAttemptTimelineEntity(timelineEntity, exitDiags, + false); + } + } + } + + @Test + public void testComponentInstanceEntity() { + Container container = new Container(); + container.id(CONTAINER_ID).ip(CONTAINER_IP).bareHost(CONTAINER_BAREHOST) + .hostname(CONTAINER_HOSTNAME).state(ContainerState.RUNNING_BUT_UNREADY) + .launchTime(new Date()); + ComponentInstanceId id = new ComponentInstanceId(0, COMPONENT_NAME); + ComponentInstance instance = mock(ComponentInstance.class); + when(instance.getCompName()).thenReturn(COMPONENT_NAME); + when(instance.getCompInstanceName()).thenReturn("comp_instance_name"); + serviceTimelinePublisher.componentInstanceStarted(container, + instance); + + Collection lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + assertEquals(1, lastPublishedEntities.size()); + TimelineEntity entity = lastPublishedEntities.iterator().next(); + + assertEquals(1, entity.getEvents().size()); + assertEquals(CONTAINER_ID, entity.getId()); + assertEquals(CONTAINER_BAREHOST, + entity.getInfo().get(ServiceTimelineMetricsConstants.BARE_HOST)); + assertEquals(COMPONENT_NAME, + entity.getInfo().get(ServiceTimelineMetricsConstants.COMPONENT_NAME)); + assertEquals(ContainerState.RUNNING_BUT_UNREADY.toString(), + entity.getInfo().get(ServiceTimelineMetricsConstants.STATE)); + + // updated container state + container.setState(ContainerState.READY); + serviceTimelinePublisher.componentInstanceIPHostUpdated(container); + lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + assertEquals(1, lastPublishedEntities.size()); + entity = lastPublishedEntities.iterator().next(); + assertEquals(2, entity.getEvents().size()); + assertEquals(ContainerState.READY.toString(), + entity.getInfo().get(ServiceTimelineMetricsConstants.STATE)); + + } + + private void verifyServiceAttemptTimelineEntity(TimelineEntity timelineEntity, + String message, boolean isRegistedEntity) { + assertEquals(SERVICEID, timelineEntity.getId()); + assertEquals(SERVICE_NAME, + timelineEntity.getInfo().get(ServiceTimelineMetricsConstants.NAME)); + if (isRegistedEntity) { + assertEquals(ServiceState.STARTED.toString(), + timelineEntity.getInfo().get(ServiceTimelineMetricsConstants.STATE)); + assertEquals(ServiceTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString(), + timelineEntity.getEvents().iterator().next().getId()); + } else { + assertEquals("ENDED", + timelineEntity.getInfo().get(ServiceTimelineMetricsConstants.STATE).toString()); + assertEquals(message, timelineEntity.getInfo() + .get(ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO)); + assertEquals(2, timelineEntity.getEvents().size()); + assertEquals(ServiceTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString(), + timelineEntity.getEvents().iterator().next().getId()); + } + } + + private void verifyComponentTimelineEntity(TimelineEntity entity) { + Map info = entity.getInfo(); + assertEquals("DEFAULT", entity.getId()); + assertEquals(ARTIFACTID, + info.get(ServiceTimelineMetricsConstants.ARTIFACT_ID)); + assertEquals("DOCKER", + info.get(ServiceTimelineMetricsConstants.ARTIFACT_TYPE)); + assertEquals("medium", + info.get(ServiceTimelineMetricsConstants.RESOURCE_PROFILE)); + assertEquals(1, info.get(ServiceTimelineMetricsConstants.RESOURCE_CPU)); + assertEquals("1024", + info.get(ServiceTimelineMetricsConstants.RESOURCE_MEMORY)); + assertEquals("sleep 1", + info.get(ServiceTimelineMetricsConstants.LAUNCH_COMMAND)); + assertEquals("false", + info.get(ServiceTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER)); + assertEquals("label", + info.get(ServiceTimelineMetricsConstants.PLACEMENT_POLICY)); + } + + private static Service createMockApplication() { + Service service = mock(Service.class); + + when(service.getId()).thenReturn(SERVICEID); + when(service.getLaunchTime()).thenReturn(new Date()); + when(service.getState()).thenReturn(ServiceState.STARTED); + when(service.getName()).thenReturn(SERVICE_NAME); + when(service.getConfiguration()).thenReturn( + new org.apache.hadoop.yarn.service.api.records.Configuration()); + + Component component = mock(Component.class); + Artifact artifact = new Artifact(); + artifact.setId(ARTIFACTID); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory(1024 + ""); + resource.setProfile("medium"); + when(component.getArtifact()).thenReturn(artifact); + when(component.getName()).thenReturn(COMPONENT_NAME); + when(component.getResource()).thenReturn(resource); + when(component.getLaunchCommand()).thenReturn("sleep 1"); + PlacementPolicy placementPolicy = new PlacementPolicy(); + placementPolicy.setLabel("label"); + when(component.getPlacementPolicy()).thenReturn(placementPolicy); + when(component.getConfiguration()).thenReturn( + new org.apache.hadoop.yarn.service.api.records.Configuration()); + List components = new ArrayList(); + components.add(component); + + when(service.getComponents()).thenReturn(components); + return service; + } + + protected static class DummyTimelineClient extends TimelineV2ClientImpl { + private Map lastPublishedEntities = + new HashMap<>(); + + public DummyTimelineClient(ApplicationId appId) { + super(appId); + } + + @Override + public void putEntitiesAsync(TimelineEntity... entities) + throws IOException, YarnException { + putEntities(entities); + } + + @Override + public void putEntities(TimelineEntity... entities) + throws IOException, YarnException { + for (TimelineEntity timelineEntity : entities) { + TimelineEntity entity = + lastPublishedEntities.get(timelineEntity.getIdentifier()); + if (entity == null) { + lastPublishedEntities.put(timelineEntity.getIdentifier(), + timelineEntity); + } else { + entity.addMetrics(timelineEntity.getMetrics()); + entity.addEvents(timelineEntity.getEvents()); + entity.addInfo(timelineEntity.getInfo()); + entity.addConfigs(timelineEntity.getConfigs()); + entity.addRelatesToEntities(timelineEntity.getRelatesToEntities()); + entity + .addIsRelatedToEntities(timelineEntity.getIsRelatedToEntities()); + } + } + } + + public Collection getLastPublishedEntities() { + return lastPublishedEntities.values(); + } + + public void reset() { + lastPublishedEntities = null; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/example-app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/example-app.json new file mode 100644 index 00000000000..5dfbd64c1ad --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/example-app.json @@ -0,0 +1,15 @@ +{ + "name": "example-app", + "components" : + [ + { + "name": "simple", + "number_of_containers": 1, + "launch_command": "sleep 2", + "resource": { + "cpus": 1, + "memory": "128" + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app-override.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app-override.json new file mode 100644 index 00000000000..30fa0eb8db2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app-override.json @@ -0,0 +1,76 @@ +{ + "name": "app-1", + "lifetime": "3600", + "configuration": { + "properties": { + "g1": "a", + "g2": "b" + }, + "files": [ + { + "type": "PROPERTIES", + "dest_file": "file1", + "properties": { + "k1": "v1", + "k2": "v2" + } + }, + { + "type": "XML", + "dest_file": "file2", + "properties": { + "k3": "v3" + } + } + ] + }, + "resource": { + "cpus": 1, + "memory": "512" + }, + "components": [ + { + "name": "simple", + "launch_command": "sleep 3600", + "number_of_containers": 2, + "configuration": { + "files": [ + { + "type": "PROPERTIES", + "dest_file": "file1", + "properties": { + "k1": "overridden" + } + } + ] + } + }, + { + "name": "master", + "launch_command": "sleep 3600", + "number_of_containers": 2, + "configuration": { + "properties": { + "name": "m", + "g1": "overridden" + } + } + }, + { + "name": "worker", + "number_of_containers": 2, + "launch_command": "sleep 3600", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "properties": { + "name": "worker", + "g1": "overridden-by-worker", + "timeout": "1000" + } + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json new file mode 100644 index 00000000000..2eb477f2740 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json @@ -0,0 +1,50 @@ +{ + "name": "app-1", + "id" : "application_1503358878042_0011", + "lifetime": "3600", + "configuration": { + "properties": { + "g1": "a", + "g2": "b", + "yarn.service.failure-count-reset.window": "60" + } + }, + "resource": { + "cpus": 1, + "memory": "512" + }, + "components": [ + { + "name": "simple", + "number_of_containers": 2, + "launch_command": "sleep 3600" + }, + { + "name": "master", + "number_of_containers": 1, + "launch_command": "sleep 3600", + "configuration": { + "properties": { + "g1": "overridden", + "g3": "will-be-overridden", + "jvm.heapsize": "512M" + } + } + }, + { + "name": "worker", + "number_of_containers": 5, + "launch_command": "sleep 3600", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "properties": { + "g1": "overridden-by-worker", + "jvm.heapsize": "512M" + } + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/default.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/default.json new file mode 100644 index 00000000000..73d4e7b28b2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/default.json @@ -0,0 +1,16 @@ +{ + "name": "default-app-1", + "lifetime": "3600", + "components" : + [ + { + "name": "sleep", + "number_of_containers": 1, + "launch_command": "sleep 3600", + "resource": { + "cpus": 2, + "memory": "256" + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external0.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external0.json new file mode 100644 index 00000000000..f0163bc6905 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external0.json @@ -0,0 +1,15 @@ +{ + "name": "external-0", + "lifetime": "3600", + + "components" : [ + { + "name" : "comp1", + "artifact": { + "type": "SERVICE", + "id": "app-1" + } + } + ] + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external1.json new file mode 100644 index 00000000000..4afdb8b23a6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external1.json @@ -0,0 +1,30 @@ +{ + "name": "external-1", + "lifetime": "3600", + "components": [ + { + "name": "simple", + "artifact": { + "type": "SERVICE", + "id": "app-1" + } + }, + { + "name": "master", + "configuration": { + "properties": { + "g3": "is-overridden" + } + } + }, + { + "name": "other", + "launch_command": "sleep 3600", + "number_of_containers": 2, + "resource": { + "cpus": 1, + "memory": "512" + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external2.json new file mode 100644 index 00000000000..0df8e0aac72 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external2.json @@ -0,0 +1,22 @@ +{ + "name": "external-2", + "lifetime": "3600", + "components": [ + { + "name": "ext", + "artifact": { + "type": "SERVICE", + "id": "external-1" + } + }, + { + "name": "another", + "launch_command": "sleep 3600", + "number_of_containers": 1, + "resource": { + "cpus": 1, + "memory": "512" + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/yarn-site.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/yarn-site.xml new file mode 100644 index 00000000000..266caa9e184 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/yarn-site.xml @@ -0,0 +1,19 @@ + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml new file mode 100644 index 00000000000..716fdb70cf0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml @@ -0,0 +1,38 @@ + + + + + hadoop-yarn-applications + org.apache.hadoop + 3.1.0-SNAPSHOT + + 4.0.0 + hadoop-yarn-services + Apache Hadoop YARN Services + pom + + + ${basedir}/../../../../hadoop-common-project/hadoop-common/target + + + + + + hadoop-yarn-services-core + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml index 644a1dc43ff..4fb579c02fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml @@ -36,6 +36,8 @@ hadoop-yarn-applications-distributedshell hadoop-yarn-applications-unmanaged-am-launcher + hadoop-yarn-services + hadoop-yarn-services-api diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java new file mode 100644 index 00000000000..6310178568a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.client.api; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; + +import java.io.IOException; +import java.util.Map; + +/** + * Client for managing applications. + */ +@Public +@Unstable +public abstract class AppAdminClient extends CompositeService { + public static final String YARN_APP_ADMIN_CLIENT_PREFIX = "yarn" + + ".application.admin.client.class."; + public static final String DEFAULT_TYPE = "yarn-service"; + public static final String DEFAULT_CLASS_NAME = "org.apache.hadoop.yarn" + + ".service.client.ServiceClient"; + + @Private + protected AppAdminClient() { + super(AppAdminClient.class.getName()); + } + + /** + *

+ * Create a new instance of AppAdminClient. + *

+ * + * @param appType application type + * @param conf configuration + * @return app admin client + */ + @Public + @Unstable + public static AppAdminClient createAppAdminClient(String appType, + Configuration conf) { + Map clientClassMap = + conf.getPropsWithPrefix(YARN_APP_ADMIN_CLIENT_PREFIX); + if (!clientClassMap.containsKey(DEFAULT_TYPE)) { + clientClassMap.put(DEFAULT_TYPE, DEFAULT_CLASS_NAME); + } + if (!clientClassMap.containsKey(appType)) { + throw new IllegalArgumentException("App admin client class name not " + + "specified for type " + appType); + } + String clientClassName = clientClassMap.get(appType); + Class clientClass; + try { + clientClass = (Class) Class.forName( + clientClassName); + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException("Invalid app admin client class", e); + } + + AppAdminClient appAdminClient = ReflectionUtils.newInstance(clientClass, + conf); + appAdminClient.init(conf); + appAdminClient.start(); + return appAdminClient; + } + + /** + *

+ * Launch a new YARN application. + *

+ * + * @param fileName specification of application + * @param appName name of the application + * @param lifetime lifetime of the application + * @param queue queue of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionLaunch(String fileName, String appName, Long + lifetime, String queue) throws IOException, YarnException; + + /** + *

+ * Stop a YARN application (attempt to stop gracefully before killing the + * application). In the case of a long-running service, the service may be + * restarted later. + *

+ * + * @param appName the name of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionStop(String appName) throws IOException, + YarnException; + + /** + *

+ * Start a YARN application from a previously saved specification. In the + * case of a long-running service, the service must have been previously + * launched/started and then stopped, or previously saved but not started. + *

+ * + * @param appName the name of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionStart(String appName) throws IOException, + YarnException; + + /** + *

+ * Save the specification for a YARN application / long-running service. + * The application may be started later. + *

+ * + * @param fileName specification of application to save + * @param appName name of the application + * @param lifetime lifetime of the application + * @param queue queue of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionSave(String fileName, String appName, Long + lifetime, String queue) throws IOException, YarnException; + + /** + *

+ * Remove the specification and all application data for a YARN application. + * The application cannot be running. + *

+ * + * @param appName the name of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionDestroy(String appName) throws IOException, + YarnException; + + /** + *

+ * Change the number of running containers for a component of a YARN + * application / long-running service. + *

+ * + * @param appName the name of the application + * @param componentCounts map of component name to new component count or + * amount to change existing component count (e.g. + * 5, +5, -5) + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionFlex(String appName, Map + componentCounts) throws IOException, YarnException; + + /** + *

+ * Upload AM dependencies to HDFS. This makes future application launches + * faster since the dependencies do not have to be uploaded on each launch. + *

+ * + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int enableFastLaunch() throws IOException, + YarnException; + + /** + *

+ * Get detailed status string for a YARN application. + *

+ * + * @param applicationId application id + * @return status string + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract String getStatusString(String applicationId) throws + IOException, YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 2a9b3bcd925..fb08fcd4a65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -23,12 +23,7 @@ import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.nio.charset.Charset; import java.text.DecimalFormat; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; @@ -54,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.AppAdminClient; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; @@ -85,6 +81,7 @@ public class ApplicationCLI extends YarnCLI { "%30s\t%20s\t%20s\t%20s\t%20s\t%20s\t%35s" + System.getProperty("line.separator"); + public static final String APP = "app"; public static final String APPLICATION = "application"; public static final String APPLICATION_ATTEMPT = "applicationattempt"; public static final String CONTAINER = "container"; @@ -93,22 +90,52 @@ public class ApplicationCLI extends YarnCLI { public static final String UPDATE_LIFETIME = "updateLifetime"; public static final String CHANGE_APPLICATION_QUEUE = "changeQueue"; + // app admin options + public static final String LAUNCH_CMD = "launch"; + public static final String STOP_CMD = "stop"; + public static final String START_CMD = "start"; + public static final String SAVE_CMD = "save"; + public static final String DESTROY_CMD = "destroy"; + public static final String FLEX_CMD = "flex"; + public static final String COMPONENT = "component"; + public static final String ENABLE_FAST_LAUNCH = "enableFastLaunch"; + + private static String firstArg = null; + private boolean allAppStates; public static void main(String[] args) throws Exception { ApplicationCLI cli = new ApplicationCLI(); cli.setSysOutPrintStream(System.out); cli.setSysErrPrintStream(System.err); - int res = ToolRunner.run(cli, args); + int res = ToolRunner.run(cli, preProcessArgs(args)); cli.stop(); System.exit(res); } + @VisibleForTesting + public static String[] preProcessArgs(String[] args) { + if (args.length > 0) { + // first argument (app|application|applicationattempt|container) must + // be stripped off for GenericOptionsParser to work + firstArg = args[0]; + return Arrays.copyOfRange(args, 1, args.length); + } else { + return args; + } + } + @Override public int run(String[] args) throws Exception { Options opts = new Options(); String title = null; - if (args.length > 0 && args[0].equalsIgnoreCase(APPLICATION)) { + if (firstArg != null) { + title = firstArg; + } else if (args.length > 0) { + title = args[0]; + } + if (title != null && (title.equalsIgnoreCase(APPLICATION) || title + .equalsIgnoreCase(APP))) { title = APPLICATION; opts.addOption(STATUS_CMD, true, "Prints the status of the application."); @@ -168,8 +195,52 @@ public class ApplicationCLI extends YarnCLI { opts.getOption(UPDATE_PRIORITY).setArgName("Priority"); opts.getOption(UPDATE_LIFETIME).setArgName("Timeout"); opts.getOption(CHANGE_APPLICATION_QUEUE).setArgName("Queue Name"); - } else if (args.length > 0 && args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { - title = APPLICATION_ATTEMPT; + opts.addOption(LAUNCH_CMD, true, "Launches application from " + + "specification file (saves specification and starts application). " + + "Options -updateLifetime and -changeQueue can be specified to alter" + + " the values provided in the file. Supports -appTypes option to " + + "specify which client implementation to use."); + opts.addOption(STOP_CMD, true, "Stops application gracefully (may be " + + "started again later). If name is provided, appType must be " + + "provided unless it is the default yarn-service. If ID is provided," + + " the appType will be looked up. Supports -appTypes option to " + + "specify which client implementation to use."); + opts.addOption(START_CMD, true, "Starts a previously saved " + + "application. Supports -appTypes option to specify which client " + + "implementation to use."); + opts.addOption(SAVE_CMD, true, "Saves specification file for " + + "an application. Options -updateLifetime and -changeQueue can be " + + "specified to alter the values provided in the file. Supports " + + "-appTypes option to specify which client implementation to use."); + opts.addOption(DESTROY_CMD, true, "Destroys a saved application " + + "specification and removes all application data permanently. " + + "Supports -appTypes option to specify which client implementation " + + "to use."); + opts.addOption(FLEX_CMD, true, "Changes number of " + + "running containers for a component of an application / " + + "long-running service. Requires -component option. If name is " + + "provided, appType must be provided unless it is the default " + + "yarn-service. If ID is provided, the appType will be looked up. " + + "Supports -appTypes option to specify which client implementation " + + "to use."); + opts.addOption(COMPONENT, true, "Works with -flex option to change " + + "the number of components/containers running for an application / " + + "long-running service. Supports absolute or relative changes, such " + + "as +1, 2, or -3."); + opts.addOption(ENABLE_FAST_LAUNCH, false, "Uploads AM dependencies " + + "to HDFS to make future launches faster. Supports -appTypes option" + + " to specify which client implementation to use."); + opts.getOption(LAUNCH_CMD).setArgName("Application Name> 0 && args[0].equalsIgnoreCase(CONTAINER)) { - title = CONTAINER; + } else if (title != null && title.equalsIgnoreCase(CONTAINER)) { opts.addOption(STATUS_CMD, true, "Prints the status of the container."); opts.addOption(LIST_CMD, true, @@ -205,23 +275,53 @@ public class ApplicationCLI extends YarnCLI { printUsage(title, opts); return exitCode; } - - if (cliParser.hasOption(STATUS_CMD)) { - if (args.length != 3) { + String[] unparsedArgs = cliParser.getArgs(); + if (firstArg == null) { + if (unparsedArgs.length != 1) { printUsage(title, opts); return exitCode; } - if (args[0].equalsIgnoreCase(APPLICATION)) { - exitCode = printApplicationReport(cliParser.getOptionValue(STATUS_CMD)); - } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { + } else { + if (unparsedArgs.length != 0) { + printUsage(title, opts); + return exitCode; + } + } + + if (cliParser.hasOption(STATUS_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, STATUS_CMD)) { + printUsage(title, opts); + return exitCode; + } + if (title.equalsIgnoreCase(APPLICATION) || + title.equalsIgnoreCase(APP)) { + ApplicationReport report = printApplicationReport(cliParser + .getOptionValue(STATUS_CMD)); + if (report == null) { + exitCode = -1; + } else { + exitCode = 0; + String appType = report.getApplicationType(); + try { + AppAdminClient client = AppAdminClient.createAppAdminClient(appType, + getConf()); + sysout.println("Detailed Application Status :"); + sysout.println(client.getStatusString(cliParser.getOptionValue( + STATUS_CMD))); + } catch (IllegalArgumentException e) { + // app type does not have app admin client implementation + } + } + } else if (title.equalsIgnoreCase(APPLICATION_ATTEMPT)) { exitCode = printApplicationAttemptReport(cliParser .getOptionValue(STATUS_CMD)); - } else if (args[0].equalsIgnoreCase(CONTAINER)) { + } else if (title.equalsIgnoreCase(CONTAINER)) { exitCode = printContainerReport(cliParser.getOptionValue(STATUS_CMD)); } return exitCode; } else if (cliParser.hasOption(LIST_CMD)) { - if (args[0].equalsIgnoreCase(APPLICATION)) { + if (title.equalsIgnoreCase(APPLICATION) || + title.equalsIgnoreCase(APP)) { allAppStates = false; Set appTypes = new HashSet(); if (cliParser.hasOption(APP_TYPE_CMD)) { @@ -272,21 +372,21 @@ public class ApplicationCLI extends YarnCLI { } } listApplications(appTypes, appStates, appTags); - } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { - if (args.length != 3) { + } else if (title.equalsIgnoreCase(APPLICATION_ATTEMPT)) { + if (hasAnyOtherCLIOptions(cliParser, opts, LIST_CMD)) { printUsage(title, opts); return exitCode; } listApplicationAttempts(cliParser.getOptionValue(LIST_CMD)); - } else if (args[0].equalsIgnoreCase(CONTAINER)) { - if (args.length != 3) { + } else if (title.equalsIgnoreCase(CONTAINER)) { + if (hasAnyOtherCLIOptions(cliParser, opts, LIST_CMD)) { printUsage(title, opts); return exitCode; } listContainers(cliParser.getOptionValue(LIST_CMD)); } } else if (cliParser.hasOption(KILL_CMD)) { - if (args.length < 3 || hasAnyOtherCLIOptions(cliParser, opts, KILL_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, KILL_CMD)) { printUsage(title, opts); return exitCode; } @@ -299,7 +399,7 @@ public class ApplicationCLI extends YarnCLI { moveApplicationAcrossQueues(cliParser.getOptionValue(MOVE_TO_QUEUE_CMD), cliParser.getOptionValue(QUEUE_CMD)); } else if (cliParser.hasOption(FAIL_CMD)) { - if (!args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { + if (!title.equalsIgnoreCase(APPLICATION_ATTEMPT)) { printUsage(title, opts); return exitCode; } @@ -314,6 +414,103 @@ public class ApplicationCLI extends YarnCLI { } updateApplicationPriority(cliParser.getOptionValue(APP_ID), cliParser.getOptionValue(UPDATE_PRIORITY)); + } else if (cliParser.hasOption(SIGNAL_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, SIGNAL_CMD)) { + printUsage(title, opts); + return exitCode; + } + final String[] signalArgs = cliParser.getOptionValues(SIGNAL_CMD); + final String containerId = signalArgs[0]; + SignalContainerCommand command = + SignalContainerCommand.OUTPUT_THREAD_DUMP; + if (signalArgs.length == 2) { + command = SignalContainerCommand.valueOf(signalArgs[1]); + } + signalToContainer(containerId, command); + } else if (cliParser.hasOption(LAUNCH_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, LAUNCH_CMD, APP_TYPE_CMD, + UPDATE_LIFETIME, CHANGE_APPLICATION_QUEUE)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + Long lifetime = null; + if (cliParser.hasOption(UPDATE_LIFETIME)) { + lifetime = Long.parseLong(cliParser.getOptionValue(UPDATE_LIFETIME)); + } + String queue = null; + if (cliParser.hasOption(CHANGE_APPLICATION_QUEUE)) { + queue = cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE); + } + String[] nameAndFile = cliParser.getOptionValues(LAUNCH_CMD); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionLaunch(nameAndFile[1], nameAndFile[0], lifetime, queue); + } else if (cliParser.hasOption(STOP_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, STOP_CMD, APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String[] appNameAndType = getAppNameAndType(cliParser, STOP_CMD); + return AppAdminClient.createAppAdminClient(appNameAndType[1], getConf()) + .actionStop(appNameAndType[0]); + } else if (cliParser.hasOption(START_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, START_CMD, APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionStart(cliParser.getOptionValue(START_CMD)); + } else if (cliParser.hasOption(SAVE_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, SAVE_CMD, APP_TYPE_CMD, + UPDATE_LIFETIME, CHANGE_APPLICATION_QUEUE)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + Long lifetime = null; + if (cliParser.hasOption(UPDATE_LIFETIME)) { + lifetime = Long.parseLong(cliParser.getOptionValue(UPDATE_LIFETIME)); + } + String queue = null; + if (cliParser.hasOption(CHANGE_APPLICATION_QUEUE)) { + queue = cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE); + } + String[] nameAndFile = cliParser.getOptionValues(SAVE_CMD); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionSave(nameAndFile[1], nameAndFile[0], lifetime, queue); + } else if (cliParser.hasOption(DESTROY_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, DESTROY_CMD, APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionDestroy(cliParser.getOptionValue(DESTROY_CMD)); + } else if (cliParser.hasOption(FLEX_CMD)) { + if (!cliParser.hasOption(COMPONENT) || + hasAnyOtherCLIOptions(cliParser, opts, FLEX_CMD, COMPONENT, + APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String[] rawCounts = cliParser.getOptionValues(COMPONENT); + Map counts = new HashMap<>(rawCounts.length/2); + for (int i = 0; i < rawCounts.length - 1; i+=2) { + counts.put(rawCounts[i], rawCounts[i+1]); + } + String[] appNameAndType = getAppNameAndType(cliParser, FLEX_CMD); + return AppAdminClient.createAppAdminClient(appNameAndType[1], getConf()) + .actionFlex(appNameAndType[0], counts); + } else if (cliParser.hasOption(ENABLE_FAST_LAUNCH)) { + String appType = getSingleAppTypeFromCLI(cliParser); + if (hasAnyOtherCLIOptions(cliParser, opts, ENABLE_FAST_LAUNCH, + APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + return AppAdminClient.createAppAdminClient(appType, getConf()) + .enableFastLaunch(); } else if (cliParser.hasOption(UPDATE_LIFETIME)) { if (!cliParser.hasOption(APP_ID)) { printUsage(title, opts); @@ -332,19 +529,6 @@ public class ApplicationCLI extends YarnCLI { } moveApplicationAcrossQueues(cliParser.getOptionValue(APP_ID), cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE)); - } else if (cliParser.hasOption(SIGNAL_CMD)) { - if (args.length < 3 || args.length > 4) { - printUsage(title, opts); - return exitCode; - } - final String[] signalArgs = cliParser.getOptionValues(SIGNAL_CMD); - final String containerId = signalArgs[0]; - SignalContainerCommand command = - SignalContainerCommand.OUTPUT_THREAD_DUMP; - if (signalArgs.length == 2) { - command = SignalContainerCommand.valueOf(signalArgs[1]); - } - signalToContainer(containerId, command); } else { syserr.println("Invalid Command Usage : "); printUsage(title, opts); @@ -352,6 +536,47 @@ public class ApplicationCLI extends YarnCLI { return 0; } + private ApplicationReport getApplicationReport(ApplicationId applicationId) + throws IOException, YarnException { + ApplicationReport appReport = null; + try { + appReport = client.getApplicationReport(applicationId); + } catch (ApplicationNotFoundException e) { + throw new YarnException("Application with id '" + applicationId + + "' doesn't exist in RM or Timeline Server."); + } + return appReport; + } + + private String[] getAppNameAndType(CommandLine cliParser, String option) + throws IOException, YarnException { + String applicationIdOrName = cliParser.getOptionValue(option); + try { + ApplicationId id = ApplicationId.fromString(applicationIdOrName); + ApplicationReport report = getApplicationReport(id); + return new String[]{report.getName(), report.getApplicationType()}; + } catch (IllegalArgumentException e) { + // assume CLI option provided the app name + // and read appType from command line since id wasn't provided + String appType = getSingleAppTypeFromCLI(cliParser); + return new String[]{applicationIdOrName, appType}; + } + } + + private static String getSingleAppTypeFromCLI(CommandLine cliParser) { + if (cliParser.hasOption(APP_TYPE_CMD)) { + String[] types = cliParser.getOptionValues(APP_TYPE_CMD); + if (types != null) { + for (String type : types) { + if (!type.trim().isEmpty()) { + return StringUtils.toLowerCase(type).trim(); + } + } + } + } + return AppAdminClient.DEFAULT_TYPE; + } + private void updateApplicationTimeout(String applicationId, ApplicationTimeoutType timeoutType, long timeoutInSec) throws YarnException, IOException { @@ -572,7 +797,7 @@ public class ApplicationCLI extends YarnCLI { /** * Kills applications with the application id as appId * - * @param Array of applicationIds + * @param applicationIds Array of applicationIds * @return errorCode * @throws YarnException * @throws IOException @@ -663,10 +888,10 @@ public class ApplicationCLI extends YarnCLI { * Prints the application report for an application id. * * @param applicationId - * @return exitCode + * @return ApplicationReport * @throws YarnException */ - private int printApplicationReport(String applicationId) + private ApplicationReport printApplicationReport(String applicationId) throws YarnException, IOException { ApplicationReport appReport = null; try { @@ -675,7 +900,7 @@ public class ApplicationCLI extends YarnCLI { } catch (ApplicationNotFoundException e) { sysout.println("Application with id '" + applicationId + "' doesn't exist in RM or Timeline Server."); - return -1; + return null; } // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -739,11 +964,11 @@ public class ApplicationCLI extends YarnCLI { + "' doesn't exist in RM."); appReportStr.close(); sysout.println(baos.toString("UTF-8")); - return -1; + return null; } appReportStr.close(); sysout.println(baos.toString("UTF-8")); - return 0; + return appReport; } private void printResourceUsage(PrintWriter appReportStr, @@ -856,11 +1081,12 @@ public class ApplicationCLI extends YarnCLI { @SuppressWarnings("unchecked") private boolean hasAnyOtherCLIOptions(CommandLine cliParser, Options opts, - String excludeOption) { + String... excludeOptions) { Collection
+ + org.apache.hadoop + hadoop-yarn-services-core +