entry : conf) {
+ if (entry.getKey().startsWith(prefix)) {
+ define(entry.getKey(), entry.getValue());
+ copied++;
+ }
+ }
+ return copied;
+ }
+
+ /**
+ * Ass a configuration option to the command line of the application
+ * @param conf configuration
+ * @param key key
+ * @param defVal default value
+ * @return the resolved configuration option
+ * @throws IllegalArgumentException if key is null or the looked up value
+ * is null (that is: the argument is missing and devVal was null.
+ */
+ public String addConfOptionToCLI(Configuration conf,
+ String key,
+ String defVal) {
+ Preconditions.checkArgument(key != null, "null key");
+ String val = conf.get(key, defVal);
+ define(key, val);
+ return val;
+ }
+
+ /**
+ * Add a -D key=val
command to the CLI. This is very Hadoop API
+ * @param key key
+ * @param val value
+ * @throws IllegalArgumentException if either argument is null
+ */
+ public void define(String key, String val) {
+ Preconditions.checkArgument(key != null, "null key");
+ Preconditions.checkArgument(val != null, "null value");
+ add("-D", key + "=" + val);
+ }
+
+ /**
+ * Add a -D key=val
command to the CLI if val
+ * is not null
+ * @param key key
+ * @param val value
+ */
+ public boolean defineIfSet(String key, String val) {
+ Preconditions.checkArgument(key != null, "null key");
+ if (val != null) {
+ define(key, val);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Add a mandatory config option
+ * @param conf configuration
+ * @param key key
+ * @throws BadConfigException if the key is missing
+ */
+ public void addMandatoryConfOption(Configuration conf,
+ String key) throws BadConfigException {
+ if (!addConfOption(conf, key)) {
+ throw new BadConfigException("Missing configuration option: " + key);
+ }
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java
new file mode 100644
index 00000000000..db9de7a1c26
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+
+/**
+ * The system is in a bad state
+ */
+public class BadClusterStateException extends SliderException {
+ public BadClusterStateException(String message,
+ Object... args) {
+ super(EXIT_BAD_STATE, message, args);
+ }
+
+ public BadClusterStateException(Throwable throwable,
+ String message, Object... args) {
+ super(EXIT_BAD_STATE, throwable, message, args);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java
new file mode 100644
index 00000000000..41e325159d2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+public class BadCommandArgumentsException extends SliderException {
+ public BadCommandArgumentsException(String s, Object... args) {
+ super(EXIT_COMMAND_ARGUMENT_ERROR, s, args);
+ }
+
+ public BadCommandArgumentsException(Throwable throwable, String message,
+ Object... args) {
+ super(EXIT_COMMAND_ARGUMENT_ERROR, throwable, message, args);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java
new file mode 100644
index 00000000000..8199c3c17ed
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+/**
+ * An exception to raise on a bad configuration
+ */
+public class BadConfigException extends SliderException {
+
+ public BadConfigException(String s) {
+ super(EXIT_BAD_CONFIGURATION, s);
+ }
+
+ public BadConfigException(String message, Object... args) {
+ super(EXIT_BAD_CONFIGURATION, message, args);
+ }
+
+ public BadConfigException(
+ Throwable throwable,
+ String message, Object... args) {
+ super(EXIT_BAD_CONFIGURATION, throwable, message, args);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java
new file mode 100644
index 00000000000..83658c89ea2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+public interface ErrorStrings {
+
+ String PRINTF_E_INSTANCE_ALREADY_EXISTS = "Service Instance \"%s\" already exists and is defined in %s";
+ String PRINTF_E_INSTANCE_DIR_ALREADY_EXISTS = "Service Instance dir already exists: %s";
+
+ /**
+ * ERROR Strings
+ */
+ String ERROR_NO_ACTION = "No action specified";
+ String ERROR_UNKNOWN_ACTION = "Unknown command: ";
+ String ERROR_NOT_ENOUGH_ARGUMENTS =
+ "Not enough arguments for action: ";
+ String ERROR_PARSE_FAILURE =
+ "Failed to parse ";
+ /**
+ * All the remaining values after argument processing
+ */
+ String ERROR_TOO_MANY_ARGUMENTS =
+ "Too many arguments";
+ String ERROR_DUPLICATE_ENTRY = "Duplicate entry for ";
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java
new file mode 100644
index 00000000000..d66b86030eb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+/**
+ * Get the exit code of an exception. Making it an interface allows
+ * us to retrofit exit codes onto existing classes
+ */
+public interface ExitCodeProvider {
+
+ /**
+ * Method to get the exit code
+ * @return the exit code
+ */
+ int getExitCode();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java
new file mode 100644
index 00000000000..483fb48d465
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+/*
+ * Common Exit codes
+ *
+ * Exit codes from 64 up are service specific.
+ *
+ * Many of the exit codes are designed to resemble HTTP error codes,
+ * squashed into a single byte. e.g 44 , "not found" is the equivalent
+ * of 404
+ *
+ * 0-10: general command issues
+ * 30-39: equivalent to the 3XX responses, where those responses are
+ * considered errors by the service.
+ * 40-49: request-related errors
+ * 50-59: server-side problems. These may be triggered by the request.
+ * 64- : service specific error codes
+ *
+ */
+public interface LauncherExitCodes {
+
+ /**
+ * 0: success
+ */
+ int EXIT_SUCCESS = 0;
+
+ /**
+ * -1: generic "false" response. The operation worked but
+ * the result was not true
+ */
+ int EXIT_FALSE = -1;
+
+ /**
+ * Exit code when a client requested service termination: {@value}
+ */
+ int EXIT_CLIENT_INITIATED_SHUTDOWN = 1;
+
+ /**
+ * Exit code when targets could not be launched: {@value}
+ */
+ int EXIT_TASK_LAUNCH_FAILURE = 2;
+
+ /**
+ * Exit code when a control-C, kill -3, signal was picked up: {@value}
+ */
+ int EXIT_INTERRUPTED = 3;
+
+ /**
+ * Exit code when a usage message was printed: {@value}
+ */
+ int EXIT_USAGE = 4;
+
+ /**
+ * Exit code when something happened but we can't be specific: {@value}
+ */
+ int EXIT_OTHER_FAILURE = 5;
+
+ /**
+ * Exit code on connectivity problems: {@value}
+ */
+ int EXIT_MOVED = 31;
+
+ /**
+ * found: {@value}.
+ *
+ * This is low value as in HTTP it is normally a success/redirect;
+ * whereas on the command line 0 is the sole success code.
+ *
+ * 302 Found
+ */
+ int EXIT_FOUND = 32;
+
+ /**
+ * Exit code on a request where the destination has not changed
+ * and (somehow) the command specified that this is an error.
+ * That is, this exit code is somehow different from a "success"
+ * : {@value}
+ *
+ * 304 Not Modified
+ */
+ int EXIT_NOT_MODIFIED = 34;
+
+ /**
+ * Exit code when the command line doesn't parse: {@value}, or
+ * when it is otherwise invalid.
+ *
+ * 400 BAD REQUEST
+ */
+ int EXIT_COMMAND_ARGUMENT_ERROR = 40;
+
+ /**
+ * The request requires user authentication: {@value}
+ *
+ * 401 Unauthorized
+ */
+ int EXIT_UNAUTHORIZED = 41;
+
+ /**
+ * Forbidden action: {@value}
+ *
+ * 403: Forbidden
+ */
+ int EXIT_FORBIDDEN = 43;
+
+ /**
+ * Something was not found: {@value}
+ *
+ * 404: NOT FOUND
+ */
+ int EXIT_NOT_FOUND = 44;
+
+ /**
+ * The operation is not allowed: {@value}
+ *
+ * 405: NOT ALLOWED
+ */
+ int EXIT_OPERATION_NOT_ALLOWED = 45;
+
+ /**
+ * The command is somehow not acceptable: {@value}
+ *
+ * 406: NOT ACCEPTABLE
+ */
+ int EXIT_NOT_ACCEPTABLE = 46;
+
+ /**
+ * Exit code on connectivity problems: {@value}
+ *
+ * 408: Request Timeout
+ */
+ int EXIT_CONNECTIVITY_PROBLEM = 48;
+
+ /**
+ * The request could not be completed due to a conflict with the current
+ * state of the resource. {@value}
+ *
+ * 409: conflict
+ */
+ int EXIT_CONFLICT = 49;
+
+ /**
+ * internal error: {@value}
+ *
+ * 500 Internal Server Error
+ */
+ int EXIT_INTERNAL_ERROR = 50;
+
+ /**
+ * Unimplemented feature: {@value}
+ *
+ * 501: Not Implemented
+ */
+ int EXIT_UNIMPLEMENTED = 51;
+
+ /**
+ * Service Unavailable; it may be available later: {@value}
+ *
+ * 503 Service Unavailable
+ */
+ int EXIT_SERVICE_UNAVAILABLE = 53;
+
+ /**
+ * The service does not support, or refuses to support this version: {@value}.
+ * If raised, this is expected to be raised server-side and likely due
+ * to client/server version incompatibilities.
+ *
+ * 505: Version Not Supported
+ */
+ int EXIT_UNSUPPORTED_VERSION = 55;
+
+ /**
+ * Exit code when an exception was thrown from the service: {@value}
+ *
+ * 5XX
+ */
+ int EXIT_EXCEPTION_THROWN = 56;
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
new file mode 100644
index 00000000000..ef22b578051
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+public interface RestApiErrorMessages {
+ String ERROR_APPLICATION_NAME_INVALID =
+ "Service name is either empty or not provided";
+ String ERROR_APPLICATION_NAME_INVALID_FORMAT =
+ "Service name %s is not valid - only lower case letters, digits, " +
+ "and hyphen are allowed, and the name must be no more " +
+ "than 63 characters";
+ String ERROR_COMPONENT_NAME_INVALID =
+ "Component name must be no more than %s characters: %s";
+ String ERROR_USER_NAME_INVALID =
+ "User name must be no more than 63 characters";
+
+ String ERROR_APPLICATION_NOT_RUNNING = "Service not running";
+ String ERROR_APPLICATION_DOES_NOT_EXIST = "Service not found";
+ String ERROR_APPLICATION_IN_USE = "Service already exists in started"
+ + " state";
+ String ERROR_APPLICATION_INSTANCE_EXISTS = "Service already exists in"
+ + " stopped/failed state (either restart with PUT or destroy with DELETE"
+ + " before creating a new one)";
+
+ String ERROR_SUFFIX_FOR_COMPONENT =
+ " for component %s (nor at the global level)";
+ String ERROR_ARTIFACT_INVALID = "Artifact is not provided";
+ String ERROR_ARTIFACT_FOR_COMP_INVALID =
+ ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+ String ERROR_ARTIFACT_ID_INVALID =
+ "Artifact id (like docker image name) is either empty or not provided";
+ String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
+ ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+
+ String ERROR_RESOURCE_INVALID = "Resource is not provided";
+ String ERROR_RESOURCE_FOR_COMP_INVALID =
+ ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+ String ERROR_RESOURCE_MEMORY_INVALID =
+ "Service resource or memory not provided";
+ String ERROR_RESOURCE_CPUS_INVALID =
+ "Service resource or cpus not provided";
+ String ERROR_RESOURCE_CPUS_INVALID_RANGE =
+ "Unacceptable no of cpus specified, either zero or negative";
+ String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID =
+ ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+ String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID =
+ ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+ String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE =
+ ERROR_RESOURCE_CPUS_INVALID_RANGE
+ + " for component %s (or at the global level)";
+ String ERROR_CONTAINERS_COUNT_INVALID =
+ "Invalid no of containers specified";
+ String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID =
+ ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+ String ERROR_DEPENDENCY_INVALID = "Dependency %s for component %s is " +
+ "invalid, does not exist as a component";
+ String ERROR_DEPENDENCY_CYCLE = "Invalid dependencies, a cycle may " +
+ "exist: %s";
+
+ String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED =
+ "Cannot specify" + " cpus/memory along with profile";
+ String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED =
+ ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED
+ + " for component %s";
+ String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET =
+ "Resource profile is not " + "supported yet. Please specify cpus/memory.";
+
+ String ERROR_NULL_ARTIFACT_ID =
+ "Artifact Id can not be null if artifact type is none";
+ String ERROR_ABSENT_NUM_OF_INSTANCE =
+ "Num of instances should appear either globally or per component";
+ String ERROR_ABSENT_LAUNCH_COMMAND =
+ "Launch_command is required when type is not DOCKER";
+
+ String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
+ + " component level, needs corresponding values set at service level";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java
new file mode 100644
index 00000000000..e83ccbe5973
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * A service launch exception that includes an exit code;
+ * when caught by the ServiceLauncher, it will convert that
+ * into a process exit code.
+ */
+public class ServiceLaunchException extends YarnException
+ implements ExitCodeProvider, LauncherExitCodes {
+
+ private final int exitCode;
+
+ /**
+ * Create an exception with the specific exit code
+ * @param exitCode exit code
+ * @param cause cause of the exception
+ */
+ public ServiceLaunchException(int exitCode, Throwable cause) {
+ super(cause);
+ this.exitCode = exitCode;
+ }
+
+ /**
+ * Create an exception with the specific exit code and text
+ * @param exitCode exit code
+ * @param message message to use in exception
+ */
+ public ServiceLaunchException(int exitCode, String message) {
+ super(message);
+ this.exitCode = exitCode;
+ }
+
+ /**
+ * Create an exception with the specific exit code, text and cause
+ * @param exitCode exit code
+ * @param message message to use in exception
+ * @param cause cause of the exception
+ */
+ public ServiceLaunchException(int exitCode, String message, Throwable cause) {
+ super(message, cause);
+ this.exitCode = exitCode;
+ }
+
+ /**
+ * Get the exit code
+ * @return the exit code
+ */
+ @Override
+ public int getExitCode() {
+ return exitCode;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java
new file mode 100644
index 00000000000..5b74b80e298
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+import org.apache.hadoop.yarn.service.conf.SliderExitCodes;
+
+public class SliderException extends ServiceLaunchException implements
+ SliderExitCodes {
+ public SliderException() {
+ super(EXIT_EXCEPTION_THROWN, "SliderException");
+ }
+
+ public SliderException(int code, String message) {
+ super(code, message);
+ }
+
+ public SliderException(String s) {
+ super(EXIT_EXCEPTION_THROWN, s);
+ }
+
+ public SliderException(String s, Throwable throwable) {
+ super(EXIT_EXCEPTION_THROWN, s, throwable);
+ }
+
+ /**
+ * Format the exception as you create it
+ * @param code exit code
+ * @param message exception message -sprintf formatted
+ * @param args arguments for the formatting
+ */
+ public SliderException(int code, String message, Object... args) {
+ super(code, String.format(message, args));
+ }
+
+ /**
+ * Format the exception, include a throwable.
+ * The throwable comes before the message so that it is out of the varargs
+ * @param code exit code
+ * @param throwable thrown
+ * @param message message
+ * @param args arguments
+ */
+ public SliderException(int code,
+ Throwable throwable,
+ String message,
+ Object... args) {
+ super(code, String.format(message, args), throwable);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java
new file mode 100644
index 00000000000..3a9fa25507d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.exceptions;
+
+/**
+ * Used to raise a usage exception ... this has the exit code
+ * {@link #EXIT_USAGE}
+ */
+public class UsageException extends SliderException {
+ public UsageException(String s, Object... args) {
+ super(EXIT_USAGE, s, args);
+ }
+
+ public UsageException(Throwable throwable, String message,
+ Object... args) {
+ super(EXIT_USAGE, throwable, message, args);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java
new file mode 100644
index 00000000000..33e33a62269
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.impl.pb.client;
+
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
+import org.apache.hadoop.yarn.service.ClientAMProtocol;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
+import org.apache.hadoop.yarn.service.impl.pb.service.ClientAMProtocolPB;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto;
+
+public class ClientAMProtocolPBClientImpl
+ implements ClientAMProtocol, Closeable {
+
+ private ClientAMProtocolPB proxy;
+
+ public ClientAMProtocolPBClientImpl(long clientVersion,
+ InetSocketAddress addr, Configuration conf) throws IOException {
+ RPC.setProtocolEngine(conf, ClientAMProtocolPB.class,
+ ProtobufRpcEngine.class);
+ proxy = RPC.getProxy(ClientAMProtocolPB.class, clientVersion, addr, conf);
+
+ }
+
+ @Override public FlexComponentsResponseProto flexComponents(
+ FlexComponentsRequestProto request) throws IOException, YarnException {
+ try {
+ return proxy.flexComponents(null, request);
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ }
+ return null;
+ }
+
+ @Override
+ public GetStatusResponseProto getStatus(GetStatusRequestProto request)
+ throws IOException, YarnException {
+ try {
+ return proxy.getStatus(null, request);
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ }
+ return null;
+ }
+
+ @Override
+ public StopResponseProto stop(StopRequestProto requestProto)
+ throws IOException, YarnException {
+ try {
+ return proxy.stop(null, requestProto);
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ }
+ return null;
+ }
+
+ @Override public void close() {
+ if (this.proxy != null) {
+ RPC.stopProxy(this.proxy);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java
new file mode 100644
index 00000000000..6a9cd3785eb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.impl.pb.service;
+
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol;
+
+@ProtocolInfo(
+ protocolName = "org.apache.hadoop.yarn.service.ClientAMProtocol",
+ protocolVersion = 1)
+public interface ClientAMProtocolPB extends
+ ClientAMProtocol.ClientAMProtocolService.BlockingInterface {
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java
new file mode 100644
index 00000000000..710078112a0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.impl.pb.service;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto;
+import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto;
+import org.apache.hadoop.yarn.service.ClientAMProtocol;
+
+import java.io.IOException;
+
+public class ClientAMProtocolPBServiceImpl implements ClientAMProtocolPB {
+
+ private ClientAMProtocol real;
+
+ public ClientAMProtocolPBServiceImpl(ClientAMProtocol impl) {
+ this.real = impl;
+ }
+
+ @Override
+ public FlexComponentsResponseProto flexComponents(RpcController controller,
+ FlexComponentsRequestProto request) throws ServiceException {
+ try {
+ return real.flexComponents(request);
+ } catch (IOException | YarnException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override public GetStatusResponseProto getStatus(RpcController controller,
+ GetStatusRequestProto request) throws ServiceException {
+ try {
+ return real.getStatus(request);
+ } catch (IOException | YarnException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
+ public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto stop(
+ RpcController controller,
+ org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request)
+ throws ServiceException {
+ try {
+ return real.stop(request);
+ } catch (IOException | YarnException e) {
+ throw new ServiceException(e);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java
new file mode 100644
index 00000000000..982448ad713
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.service.ServiceContext;
+import org.apache.hadoop.yarn.service.component.Component;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.service.component.ComponentEvent;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
+import org.apache.hadoop.yarn.service.component.ComponentState;
+import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.STARTED;
+import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX;
+import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_NOT_READY;
+import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY;
+import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL;
+
+public class ServiceMonitor extends AbstractService {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ServiceMonitor.class);
+
+ public ScheduledExecutorService executorService;
+ private Map liveInstances = null;
+ private ServiceContext context;
+ private Configuration conf;
+
+ public ServiceMonitor(String name, ServiceContext context) {
+ super(name);
+ liveInstances = context.scheduler.getLiveInstances();
+ this.context = context;
+ }
+
+ @Override
+ public void serviceInit(Configuration conf) throws Exception {
+ executorService = Executors.newScheduledThreadPool(1);
+ this.conf = conf;
+ super.serviceInit(conf);
+ }
+
+ @Override
+ public void serviceStart() throws Exception {
+ long readinessCheckInterval = YarnServiceConf
+ .getLong(READINESS_CHECK_INTERVAL, DEFAULT_READINESS_CHECK_INTERVAL,
+ context.service.getConfiguration(), conf);
+
+ executorService
+ .scheduleAtFixedRate(new ReadinessChecker(), readinessCheckInterval,
+ readinessCheckInterval, TimeUnit.SECONDS);
+
+ // Default 6 hours.
+ long failureResetInterval = YarnServiceConf
+ .getLong(CONTAINER_FAILURE_WINDOW, 21600,
+ context.service.getConfiguration(), conf);
+
+ executorService
+ .scheduleAtFixedRate(new ContainerFailureReset(), failureResetInterval,
+ failureResetInterval, TimeUnit.SECONDS);
+ }
+
+ @Override
+ public void serviceStop() throws Exception {
+ if (executorService != null) {
+ executorService.shutdownNow();
+ }
+ }
+
+ private class ReadinessChecker implements Runnable {
+
+ @Override
+ public void run() {
+
+ // check if the comp instance are ready
+ for (Map.Entry entry : liveInstances
+ .entrySet()) {
+ ComponentInstance instance = entry.getValue();
+
+ ProbeStatus status = instance.ping();
+ if (status.isSuccess()) {
+ if (instance.getState() == STARTED) {
+ // synchronously update the state.
+ instance.handle(
+ new ComponentInstanceEvent(entry.getKey(), BECOME_READY));
+ }
+ } else {
+ if (instance.getState() == READY) {
+ instance.handle(
+ new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY));
+ }
+ }
+ }
+
+ for (Component component : context.scheduler.getAllComponents()
+ .values()) {
+ // If comp hasn't started yet and its dependencies are satisfied
+ if (component.getState() == ComponentState.INIT && component
+ .areDependenciesReady()) {
+ LOG.info("[COMPONENT {}]: Dependencies satisfied, ramping up.",
+ component.getName());
+ ComponentEvent event = new ComponentEvent(component.getName(), FLEX)
+ .setDesired(component.getComponentSpec().getNumberOfContainers());
+ component.handle(event);
+ }
+ }
+ }
+ }
+
+ private class ContainerFailureReset implements Runnable {
+ @Override
+ public void run() {
+ for (Component component : context.scheduler.getAllComponents().values()) {
+ component.resetCompFailureCount();
+ }
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java
new file mode 100644
index 00000000000..1ed13a9c360
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Map;
+
+public class HttpProbe extends Probe {
+ protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class);
+
+ private static final String HOST_TOKEN = "${THIS_HOST}";
+
+ private final String urlString;
+ private final int timeout;
+ private final int min, max;
+
+
+ public HttpProbe(String url, int timeout, int min, int max, Configuration
+ conf) {
+ super("Http probe of " + url + " [" + min + "-" + max + "]", conf);
+ this.urlString = url;
+ this.timeout = timeout;
+ this.min = min;
+ this.max = max;
+ }
+
+ public static HttpProbe create(Map props)
+ throws IOException {
+ String urlString = getProperty(props, WEB_PROBE_URL, null);
+ new URL(urlString);
+ int timeout = getPropertyInt(props, WEB_PROBE_CONNECT_TIMEOUT,
+ WEB_PROBE_CONNECT_TIMEOUT_DEFAULT);
+ int minSuccess = getPropertyInt(props, WEB_PROBE_MIN_SUCCESS,
+ WEB_PROBE_MIN_SUCCESS_DEFAULT);
+ int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS,
+ WEB_PROBE_MAX_SUCCESS_DEFAULT);
+ return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null);
+ }
+
+
+ private static HttpURLConnection getConnection(URL url, int timeout) throws
+ IOException {
+ HttpURLConnection connection = (HttpURLConnection) url.openConnection();
+ connection.setInstanceFollowRedirects(true);
+ connection.setConnectTimeout(timeout);
+ return connection;
+ }
+
+ @Override
+ public ProbeStatus ping(ComponentInstance instance) {
+ ProbeStatus status = new ProbeStatus();
+ ContainerStatus containerStatus = instance.getContainerStatus();
+ if (containerStatus == null || ServiceUtils.isEmpty(containerStatus.getIPs())
+ || StringUtils.isEmpty(containerStatus.getHost())) {
+ status.fail(this, new IOException("IP is not available yet"));
+ return status;
+ }
+
+ String ip = containerStatus.getIPs().get(0);
+ HttpURLConnection connection = null;
+ try {
+ URL url = new URL(urlString.replace(HOST_TOKEN, ip));
+ connection = getConnection(url, this.timeout);
+ int rc = connection.getResponseCode();
+ if (rc < min || rc > max) {
+ String error = "Probe " + url + " error code: " + rc;
+ log.info(error);
+ status.fail(this,
+ new IOException(error));
+ } else {
+ status.succeed(this);
+ }
+ } catch (Throwable e) {
+ String error = "Probe " + urlString + " failed for IP " + ip + ": " + e;
+ log.info(error, e);
+ status.fail(this,
+ new IOException(error, e));
+ } finally {
+ if (connection != null) {
+ connection.disconnect();
+ }
+ }
+ return status;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java
new file mode 100644
index 00000000000..9ad86fe61bb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+/**
+ * Build up log entries for ease of splunk
+ */
+public class LogEntryBuilder {
+
+ private final StringBuilder builder = new StringBuilder();
+
+ public LogEntryBuilder() {
+ }
+
+ public LogEntryBuilder(String text) {
+ elt(text);
+ }
+
+
+ public LogEntryBuilder(String name, Object value) {
+ entry(name, value);
+ }
+
+ public LogEntryBuilder elt(String text) {
+ addComma();
+ builder.append(text);
+ return this;
+ }
+
+ public LogEntryBuilder elt(String name, Object value) {
+ addComma();
+ entry(name, value);
+ return this;
+ }
+
+ private void addComma() {
+ if (!isEmpty()) {
+ builder.append(", ");
+ }
+ }
+
+ private void entry(String name, Object value) {
+ builder.append(name).append('=');
+ if (value != null) {
+ builder.append('"').append(value.toString()).append('"');
+ } else {
+ builder.append("null");
+ }
+ }
+
+ @Override
+ public String toString() {
+ return builder.toString();
+ }
+
+ private boolean isEmpty() {
+ return builder.length() == 0;
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java
new file mode 100644
index 00000000000..55b55f68eec
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+/**
+ * Config keys for monitoring
+ */
+public interface MonitorKeys {
+
+ /**
+ * Port probing key : port to attempt to create a TCP connection to {@value}.
+ */
+ String PORT_PROBE_PORT = "port";
+ /**
+ * Port probing key : timeout for the the connection attempt {@value}.
+ */
+ String PORT_PROBE_CONNECT_TIMEOUT = "timeout";
+ /**
+ * Port probing default : timeout for the connection attempt {@value}.
+ */
+ int PORT_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000;
+
+ /**
+ * Web probing key : URL {@value}.
+ */
+ String WEB_PROBE_URL = "url";
+ /**
+ * Web probing key : min success code {@value}.
+ */
+ String WEB_PROBE_MIN_SUCCESS = "min.success";
+ /**
+ * Web probing key : max success code {@value}.
+ */
+ String WEB_PROBE_MAX_SUCCESS = "max.success";
+ /**
+ * Web probing default : min successful response code {@value}.
+ */
+ int WEB_PROBE_MIN_SUCCESS_DEFAULT = 200;
+ /**
+ * Web probing default : max successful response code {@value}.
+ */
+ int WEB_PROBE_MAX_SUCCESS_DEFAULT = 299;
+ /**
+ * Web probing key : timeout for the connection attempt {@value}
+ */
+ String WEB_PROBE_CONNECT_TIMEOUT = "timeout";
+ /**
+ * Port probing default : timeout for the connection attempt {@value}.
+ */
+ int WEB_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java
new file mode 100644
index 00000000000..c4f63aee7ae
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.hadoop.yarn.service.api.records.ReadinessCheck;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Formatter;
+import java.util.Locale;
+
+/**
+ * Various utils to work with the monitor
+ */
+public final class MonitorUtils {
+ protected static final Logger LOG = LoggerFactory.getLogger(MonitorUtils
+ .class);
+
+ private MonitorUtils() {
+ }
+
+ public static String toPlural(int val) {
+ return val != 1 ? "s" : "";
+ }
+
+ /**
+ * Convert milliseconds to human time -the exact format is unspecified
+ * @param milliseconds a time in milliseconds
+ * @return a time that is converted to human intervals
+ */
+ public static String millisToHumanTime(long milliseconds) {
+ StringBuilder sb = new StringBuilder();
+ // Send all output to the Appendable object sb
+ Formatter formatter = new Formatter(sb, Locale.US);
+
+ long s = Math.abs(milliseconds / 1000);
+ long m = Math.abs(milliseconds % 1000);
+ if (milliseconds > 0) {
+ formatter.format("%d.%03ds", s, m);
+ } else if (milliseconds == 0) {
+ formatter.format("0");
+ } else {
+ formatter.format("-%d.%03ds", s, m);
+ }
+ return sb.toString();
+ }
+
+ public static Probe getProbe(ReadinessCheck readinessCheck) {
+ if (readinessCheck == null) {
+ return null;
+ }
+ if (readinessCheck.getType() == null) {
+ return null;
+ }
+ try {
+ switch (readinessCheck.getType()) {
+ case HTTP:
+ return HttpProbe.create(readinessCheck.getProperties());
+ case PORT:
+ return PortProbe.create(readinessCheck.getProperties());
+ default:
+ return null;
+ }
+ } catch (Throwable t) {
+ throw new IllegalArgumentException("Error creating readiness check " +
+ t);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java
new file mode 100644
index 00000000000..85569f86d4a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.Map;
+
+/**
+ * Probe for a port being open.
+ */
+public class PortProbe extends Probe {
+ protected static final Logger log = LoggerFactory.getLogger(PortProbe.class);
+ private final int port;
+ private final int timeout;
+
+ public PortProbe(int port, int timeout) {
+ super("Port probe of " + port + " for " + timeout + "ms", null);
+ this.port = port;
+ this.timeout = timeout;
+ }
+
+ public static PortProbe create(Map props)
+ throws IOException {
+ int port = getPropertyInt(props, PORT_PROBE_PORT, null);
+
+ if (port >= 65536) {
+ throw new IOException(PORT_PROBE_PORT + " " + port + " is out of " +
+ "range");
+ }
+
+ int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT,
+ PORT_PROBE_CONNECT_TIMEOUT_DEFAULT);
+
+ return new PortProbe(port, timeout);
+ }
+
+ /**
+ * Try to connect to the (host,port); a failure to connect within
+ * the specified timeout is a failure.
+ * @param instance role instance
+ * @return the outcome
+ */
+ @Override
+ public ProbeStatus ping(ComponentInstance instance) {
+ ProbeStatus status = new ProbeStatus();
+
+ if (instance.getContainerStatus() == null || ServiceUtils
+ .isEmpty(instance.getContainerStatus().getIPs())) {
+ status.fail(this, new IOException(
+ instance.getCompInstanceName() + ": IP is not available yet"));
+ return status;
+ }
+
+ String ip = instance.getContainerStatus().getIPs().get(0);
+ InetSocketAddress sockAddr = new InetSocketAddress(ip, port);
+ Socket socket = new Socket();
+ try {
+ if (log.isDebugEnabled()) {
+ log.debug(instance.getCompInstanceName() + ": Connecting " + sockAddr
+ .toString() + ", timeout=" + MonitorUtils
+ .millisToHumanTime(timeout));
+ }
+ socket.connect(sockAddr, timeout);
+ status.succeed(this);
+ } catch (Throwable e) {
+ String error =
+ instance.getCompInstanceName() + ": Probe " + sockAddr + " failed";
+ log.debug(error, e);
+ status.fail(this, new IOException(error, e));
+ } finally {
+ IOUtils.closeSocket(socket);
+ }
+ return status;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java
new file mode 100644
index 00000000000..3237a2bd499
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Base class of all probes.
+ */
+public abstract class Probe implements MonitorKeys {
+
+ protected final Configuration conf;
+ private String name;
+
+ /**
+ * Create a probe of a specific name
+ *
+ * @param name probe name
+ * @param conf configuration being stored.
+ */
+ public Probe(String name, Configuration conf) {
+ this.name = name;
+ this.conf = conf;
+ }
+
+
+ protected void setName(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+
+ @Override
+ public String toString() {
+ return getName();
+ }
+
+ public static String getProperty(Map props, String name,
+ String defaultValue) throws IOException {
+ String value = props.get(name);
+ if (StringUtils.isEmpty(value)) {
+ if (defaultValue == null) {
+ throw new IOException(name + " not specified");
+ }
+ return defaultValue;
+ }
+ return value;
+ }
+
+ public static int getPropertyInt(Map props, String name,
+ Integer defaultValue) throws IOException {
+ String value = props.get(name);
+ if (StringUtils.isEmpty(value)) {
+ if (defaultValue == null) {
+ throw new IOException(name + " not specified");
+ }
+ return defaultValue;
+ }
+ return Integer.parseInt(value);
+ }
+
+ /**
+ * perform any prelaunch initialization
+ */
+ public void init() throws IOException {
+
+ }
+
+ /**
+ * Ping the endpoint. All exceptions must be caught and included in the
+ * (failure) status.
+ *
+ * @param instance instance to ping
+ * @return the status
+ */
+ public abstract ProbeStatus ping(ComponentInstance instance);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java
new file mode 100644
index 00000000000..bc62dcd0c1c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import java.io.Serializable;
+import java.util.Date;
+
+/**
+ * Status message of a probe. This is designed to be sent over the wire, though the exception
+ * Had better be unserializable at the far end if that is to work.
+ */
+public final class ProbeStatus implements Serializable {
+ private static final long serialVersionUID = 165468L;
+
+ private long timestamp;
+ private String timestampText;
+ private boolean success;
+ private boolean realOutcome;
+ private String message;
+ private Throwable thrown;
+ private transient Probe originator;
+
+ public ProbeStatus() {
+ }
+
+ public ProbeStatus(long timestamp, String message, Throwable thrown) {
+ this.success = false;
+ this.message = message;
+ this.thrown = thrown;
+ setTimestamp(timestamp);
+ }
+
+ public ProbeStatus(long timestamp, String message) {
+ this.success = true;
+ setTimestamp(timestamp);
+ this.message = message;
+ this.thrown = null;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ public void setTimestamp(long timestamp) {
+ this.timestamp = timestamp;
+ timestampText = new Date(timestamp).toString();
+ }
+
+ public boolean isSuccess() {
+ return success;
+ }
+
+ /**
+ * Set both the success and the real outcome bits to the same value
+ * @param success the new value
+ */
+ public void setSuccess(boolean success) {
+ this.success = success;
+ realOutcome = success;
+ }
+
+ public String getTimestampText() {
+ return timestampText;
+ }
+
+ public boolean getRealOutcome() {
+ return realOutcome;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public void setMessage(String message) {
+ this.message = message;
+ }
+
+ public Throwable getThrown() {
+ return thrown;
+ }
+
+ public void setThrown(Throwable thrown) {
+ this.thrown = thrown;
+ }
+
+ /**
+ * Get the probe that generated this result. May be null
+ * @return a possibly null reference to a probe
+ */
+ public Probe getOriginator() {
+ return originator;
+ }
+
+ /**
+ * The probe has succeeded -capture the current timestamp, set
+ * success to true, and record any other data needed.
+ * @param probe probe
+ */
+ public void succeed(Probe probe) {
+ finish(probe, true, probe.getName(), null);
+ }
+
+ /**
+ * A probe has failed either because the test returned false, or an exception
+ * was thrown. The {@link #success} field is set to false, any exception
+ * thrown is recorded.
+ * @param probe probe that failed
+ * @param thrown an exception that was thrown.
+ */
+ public void fail(Probe probe, Throwable thrown) {
+ finish(probe, false, "Failure in " + probe, thrown);
+ }
+
+ public void finish(Probe probe, boolean succeeded, String text, Throwable thrown) {
+ setTimestamp(System.currentTimeMillis());
+ setSuccess(succeeded);
+ originator = probe;
+ message = text;
+ this.thrown = thrown;
+ }
+
+ @Override
+ public String toString() {
+ LogEntryBuilder builder = new LogEntryBuilder("Probe Status");
+ builder.elt("time", timestampText)
+ .elt("outcome", (success ? "success" : "failure"));
+
+ if (success != realOutcome) {
+ builder.elt("originaloutcome", (realOutcome ? "success" : "failure"));
+ }
+ builder.elt("message", message);
+ if (thrown != null) {
+ builder.elt("exception", thrown);
+ }
+
+ return builder.toString();
+ }
+
+ /**
+ * Flip the success bit on while the real outcome bit is kept false
+ */
+ public void markAsSuccessful() {
+ success = true;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
new file mode 100644
index 00000000000..fc8953c02de
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.provider;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.text.MessageFormat;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants.CONTENT;
+
+public abstract class AbstractClientProvider {
+
+ public AbstractClientProvider() {
+ }
+
+ /**
+ * Generates a fixed format of application tags given one or more of
+ * application name, version and description. This allows subsequent query for
+ * an application with a name only, version only or description only or any
+ * combination of those as filters.
+ *
+ * @param appName name of the application
+ * @param appVersion version of the application
+ * @param appDescription brief description of the application
+ * @return
+ */
+ public static final Set createApplicationTags(String appName,
+ String appVersion, String appDescription) {
+ Set tags = new HashSet<>();
+ tags.add(ServiceUtils.createNameTag(appName));
+ if (appVersion != null) {
+ tags.add(ServiceUtils.createVersionTag(appVersion));
+ }
+ if (appDescription != null) {
+ tags.add(ServiceUtils.createDescriptionTag(appDescription));
+ }
+ return tags;
+ }
+
+ /**
+ * Validate the artifact.
+ * @param artifact
+ */
+ public abstract void validateArtifact(Artifact artifact, FileSystem
+ fileSystem) throws IOException;
+
+ protected abstract void validateConfigFile(ConfigFile configFile, FileSystem
+ fileSystem) throws IOException;
+
+ /**
+ * Validate the config files.
+ * @param configFiles config file list
+ * @param fs file system
+ */
+ public void validateConfigFiles(List configFiles,
+ FileSystem fs) throws IOException {
+ Set destFileSet = new HashSet<>();
+
+ for (ConfigFile file : configFiles) {
+ if (file.getType() == null) {
+ throw new IllegalArgumentException("File type is empty");
+ }
+
+ if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE)) {
+ if (StringUtils.isEmpty(file.getSrcFile()) &&
+ !file.getProperties().containsKey(CONTENT)) {
+ throw new IllegalArgumentException(MessageFormat.format("For {0} " +
+ "format, either src_file must be specified in ConfigFile," +
+ " or the \"{1}\" key must be specified in " +
+ "the 'properties' field of ConfigFile. ",
+ ConfigFile.TypeEnum.TEMPLATE, CONTENT));
+ }
+ }
+ if (!StringUtils.isEmpty(file.getSrcFile())) {
+ Path p = new Path(file.getSrcFile());
+ if (!fs.exists(p)) {
+ throw new IllegalArgumentException(
+ "Src_file does not exist for config file: " + file
+ .getSrcFile());
+ }
+ }
+
+ if (StringUtils.isEmpty(file.getDestFile())) {
+ throw new IllegalArgumentException("Dest_file is empty.");
+ }
+
+ if (destFileSet.contains(file.getDestFile())) {
+ throw new IllegalArgumentException(
+ "Duplicated ConfigFile exists: " + file.getDestFile());
+ }
+ destFileSet.add(file.getDestFile());
+
+ java.nio.file.Path destPath = Paths.get(file.getDestFile());
+ if (!destPath.isAbsolute() && destPath.getNameCount() > 1) {
+ throw new IllegalArgumentException("Non-absolute dest_file has more " +
+ "than one path element");
+ }
+
+ // provider-specific validation
+ validateConfigFile(file, fs);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
new file mode 100644
index 00000000000..6d7406199ad
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+import org.apache.hadoop.yarn.service.containerlaunch.CommandLineBuilder;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.ServiceContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX;
+import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$;
+
+public abstract class AbstractProviderService implements ProviderService,
+ YarnServiceConstants {
+
+ protected static final Logger log =
+ LoggerFactory.getLogger(AbstractProviderService.class);
+
+ public abstract void processArtifact(AbstractLauncher launcher,
+ ComponentInstance compInstance, SliderFileSystem fileSystem,
+ Service service)
+ throws IOException;
+
+ public void buildContainerLaunchContext(AbstractLauncher launcher,
+ Service service, ComponentInstance instance,
+ SliderFileSystem fileSystem, Configuration yarnConf)
+ throws IOException, SliderException {
+ Component component = instance.getComponent().getComponentSpec();;
+ processArtifact(launcher, instance, fileSystem, service);
+
+ ServiceContext context =
+ instance.getComponent().getScheduler().getContext();
+ // Generate tokens (key-value pair) for config substitution.
+ // Get pre-defined tokens
+ Map globalTokens =
+ instance.getComponent().getScheduler().globalTokens;
+ Map tokensForSubstitution = ProviderUtils
+ .initCompTokensForSubstitute(instance);
+ tokensForSubstitution.putAll(globalTokens);
+ // Set the environment variables in launcher
+ launcher.putEnv(ServiceUtils
+ .buildEnvMap(component.getConfiguration(), tokensForSubstitution));
+ launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
+ launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
+ if (System.getenv(HADOOP_USER_NAME) != null) {
+ launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
+ }
+ launcher.setEnv("LANG", "en_US.UTF-8");
+ launcher.setEnv("LC_ALL", "en_US.UTF-8");
+ launcher.setEnv("LANGUAGE", "en_US.UTF-8");
+
+ for (Entry entry : launcher.getEnv().entrySet()) {
+ tokensForSubstitution.put($(entry.getKey()), entry.getValue());
+ }
+ //TODO add component host tokens?
+// ProviderUtils.addComponentHostTokens(tokensForSubstitution, amState);
+
+ // create config file on hdfs and add local resource
+ ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
+ component, tokensForSubstitution, instance, context);
+
+ // substitute launch command
+ String launchCommand = component.getLaunchCommand();
+ // docker container may have empty commands
+ if (!StringUtils.isEmpty(launchCommand)) {
+ launchCommand = ProviderUtils
+ .substituteStrWithTokens(launchCommand, tokensForSubstitution);
+ CommandLineBuilder operation = new CommandLineBuilder();
+ operation.add(launchCommand);
+ operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
+ launcher.addCommand(operation.build());
+ }
+
+ // By default retry forever every 30 seconds
+ launcher.setRetryContext(YarnServiceConf
+ .getInt(CONTAINER_RETRY_MAX, -1, service.getConfiguration(),
+ yarnConf), YarnServiceConf
+ .getInt(CONTAINER_RETRY_INTERVAL, 30000, service.getConfiguration(),
+ yarnConf));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java
new file mode 100644
index 00000000000..0f949e0bace
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.provider;
+
+import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory;
+import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for factories.
+ */
+public abstract class ProviderFactory {
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(ProviderFactory.class);
+
+ protected ProviderFactory() {}
+
+ public abstract AbstractClientProvider createClientProvider();
+
+ public abstract ProviderService createServerProvider();
+
+ public static synchronized ProviderService getProviderService(Artifact
+ artifact) {
+ return createServiceProviderFactory(artifact).createServerProvider();
+ }
+
+ public static synchronized AbstractClientProvider getClientProvider(Artifact
+ artifact) {
+ return createServiceProviderFactory(artifact).createClientProvider();
+ }
+
+ /**
+ * Create a provider for a specific service
+ * @param artifact artifact
+ * @return provider factory
+ */
+ public static synchronized ProviderFactory createServiceProviderFactory(
+ Artifact artifact) {
+ if (artifact == null || artifact.getType() == null) {
+ LOG.debug("Loading service provider type default");
+ return DefaultProviderFactory.getInstance();
+ }
+ LOG.debug("Loading service provider type {}", artifact.getType());
+ switch (artifact.getType()) {
+ // TODO add handling for custom types?
+ // TODO handle service
+ case DOCKER:
+ return DockerProviderFactory.getInstance();
+ case TARBALL:
+ return TarballProviderFactory.getInstance();
+ default:
+ throw new IllegalArgumentException(String.format("Resolution error, " +
+ "%s should not be passed to createServiceProviderFactory",
+ artifact.getType()));
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java
new file mode 100644
index 00000000000..eb721b4a4f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.provider;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+
+import java.io.IOException;
+
+public interface ProviderService {
+
+ /**
+ * Set up the entire container launch context
+ */
+ void buildContainerLaunchContext(AbstractLauncher containerLauncher,
+ Service service, ComponentInstance instance,
+ SliderFileSystem sliderFileSystem, Configuration yarnConf)
+ throws IOException, SliderException;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
new file mode 100644
index 00000000000..e074dd756ae
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -0,0 +1,408 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.provider;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.service.ServiceContext;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.api.records.ConfigFormat;
+import org.apache.hadoop.yarn.service.api.records.Configuration;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException;
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+import org.apache.hadoop.yarn.service.utils.PublishedConfiguration;
+import org.apache.hadoop.yarn.service.utils.PublishedConfigurationOutputter;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
+
+/**
+ * This is a factoring out of methods handy for providers. It's bonded to a log
+ * at construction time.
+ */
+public class ProviderUtils implements YarnServiceConstants {
+
+ protected static final Logger log =
+ LoggerFactory.getLogger(ProviderUtils.class);
+
+
+ /**
+ * Add oneself to the classpath. This does not work
+ * on minicluster test runs where the JAR is not built up.
+ * @param providerResources map of provider resources to add these entries to
+ * @param providerClass provider to add
+ * @param jarName name of the jar to use
+ * @param sliderFileSystem target filesystem
+ * @param tempPath path in the cluster FS for temp files
+ * @param libdir relative directory to place resources
+ * @param miniClusterTestRun true if minicluster is being used
+ * @return true if the class was found in a JAR
+ *
+ * @throws FileNotFoundException if the JAR was not found and this is NOT
+ * a mini cluster test run
+ * @throws IOException IO problems
+ * @throws SliderException any Slider problem
+ */
+ public static boolean addProviderJar(
+ Map providerResources,
+ Class providerClass,
+ String jarName,
+ SliderFileSystem sliderFileSystem,
+ Path tempPath,
+ String libdir,
+ boolean miniClusterTestRun) throws
+ IOException,
+ SliderException {
+ try {
+ ServiceUtils.putJar(providerResources,
+ sliderFileSystem,
+ providerClass,
+ tempPath,
+ libdir,
+ jarName);
+ return true;
+ } catch (FileNotFoundException e) {
+ if (miniClusterTestRun) {
+ return false;
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ /**
+ * Loads all dependency jars from the default path.
+ * @param providerResources map of provider resources to add these entries to
+ * @param sliderFileSystem target filesystem
+ * @param tempPath path in the cluster FS for temp files
+ * @param libDir relative directory to place resources
+ * @param libLocalSrcDir explicitly supplied local libs dir
+ * @throws IOException trouble copying to HDFS
+ * @throws SliderException trouble copying to HDFS
+ */
+ public static void addAllDependencyJars(
+ Map providerResources,
+ SliderFileSystem sliderFileSystem,
+ Path tempPath,
+ String libDir,
+ String libLocalSrcDir)
+ throws IOException, SliderException {
+ if (ServiceUtils.isSet(libLocalSrcDir)) {
+ File file = new File(libLocalSrcDir);
+ if (!file.exists() || !file.isDirectory()) {
+ throw new BadCommandArgumentsException(
+ "Supplied lib src dir %s is not valid", libLocalSrcDir);
+ }
+ }
+ ServiceUtils.putAllJars(providerResources, sliderFileSystem, tempPath,
+ libDir, libLocalSrcDir);
+ }
+
+ public static String substituteStrWithTokens(String content,
+ Map tokensForSubstitution) {
+ for (Map.Entry token : tokensForSubstitution.entrySet()) {
+ content =
+ content.replaceAll(Pattern.quote(token.getKey()), token.getValue());
+ }
+ return content;
+ }
+
+ // configs will be substituted by corresponding env in tokenMap
+ public static void substituteMapWithTokens(Map configs,
+ Map tokenMap) {
+ for (Map.Entry entry : configs.entrySet()) {
+ String value = entry.getValue();
+ if (tokenMap != null) {
+ for (Map.Entry token : tokenMap.entrySet()) {
+ value =
+ value.replaceAll(Pattern.quote(token.getKey()), token.getValue());
+ }
+ }
+ entry.setValue(value);
+ }
+ }
+
+ /**
+ * Localize the service keytabs for the service.
+ * @param launcher container launcher
+ * @param fileSystem file system
+ * @throws IOException trouble uploading to HDFS
+ */
+ public void localizeServiceKeytabs(AbstractLauncher launcher,
+ SliderFileSystem fileSystem, Service service) throws IOException {
+
+ Configuration conf = service.getConfiguration();
+ String keytabPathOnHost =
+ conf.getProperty(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH);
+ if (ServiceUtils.isUnset(keytabPathOnHost)) {
+ String amKeytabName =
+ conf.getProperty(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME);
+ String keytabDir =
+ conf.getProperty(YarnServiceConf.KEY_HDFS_KEYTAB_DIR);
+ // we need to localize the keytab files in the directory
+ Path keytabDirPath = fileSystem.buildKeytabPath(keytabDir, null,
+ service.getName());
+ boolean serviceKeytabsDeployed = false;
+ if (fileSystem.getFileSystem().exists(keytabDirPath)) {
+ FileStatus[] keytabs = fileSystem.getFileSystem().listStatus(
+ keytabDirPath);
+ LocalResource keytabRes;
+ for (FileStatus keytab : keytabs) {
+ if (!amKeytabName.equals(keytab.getPath().getName())
+ && keytab.getPath().getName().endsWith(".keytab")) {
+ serviceKeytabsDeployed = true;
+ log.info("Localizing keytab {}", keytab.getPath().getName());
+ keytabRes = fileSystem.createAmResource(keytab.getPath(),
+ LocalResourceType.FILE);
+ launcher.addLocalResource(KEYTAB_DIR + "/" +
+ keytab.getPath().getName(),
+ keytabRes);
+ }
+ }
+ }
+ if (!serviceKeytabsDeployed) {
+ log.warn("No service keytabs for the service have been localized. "
+ + "If the service requires keytabs for secure operation, "
+ + "please ensure that the required keytabs have been uploaded "
+ + "to the folder {}", keytabDirPath);
+ }
+ }
+ }
+
+ public static Path initCompInstanceDir(SliderFileSystem fs,
+ ComponentInstance instance) {
+ Path compDir = new Path(new Path(fs.getAppDir(), "components"),
+ instance.getCompName());
+ Path compInstanceDir = new Path(compDir, instance.getCompInstanceName());
+ instance.setCompInstanceDir(compInstanceDir);
+ return compInstanceDir;
+ }
+
+ // 1. Create all config files for a component on hdfs for localization
+ // 2. Add the config file to localResource
+ public static synchronized void createConfigFileAndAddLocalResource(
+ AbstractLauncher launcher, SliderFileSystem fs, Component component,
+ Map tokensForSubstitution, ComponentInstance instance,
+ ServiceContext context) throws IOException {
+ Path compInstanceDir = initCompInstanceDir(fs, instance);
+ if (!fs.getFileSystem().exists(compInstanceDir)) {
+ log.info(instance.getCompInstanceId() + ": Creating dir on hdfs: " + compInstanceDir);
+ fs.getFileSystem().mkdirs(compInstanceDir,
+ new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
+ } else {
+ log.info("Component instance conf dir already exists: " + compInstanceDir);
+ }
+
+ if (log.isDebugEnabled()) {
+ log.debug("Tokens substitution for component instance: " + instance
+ .getCompInstanceName() + System.lineSeparator()
+ + tokensForSubstitution);
+ }
+
+ for (ConfigFile originalFile : component.getConfiguration().getFiles()) {
+ ConfigFile configFile = originalFile.copy();
+ String fileName = new Path(configFile.getDestFile()).getName();
+
+ // substitute file name
+ for (Map.Entry token : tokensForSubstitution.entrySet()) {
+ configFile.setDestFile(configFile.getDestFile()
+ .replaceAll(Pattern.quote(token.getKey()), token.getValue()));
+ }
+
+ Path remoteFile = new Path(compInstanceDir, fileName);
+ if (!fs.getFileSystem().exists(remoteFile)) {
+ log.info("Saving config file on hdfs for component " + instance
+ .getCompInstanceName() + ": " + configFile);
+
+ if (configFile.getSrcFile() != null) {
+ // Load config file template
+ switch (configFile.getType()) {
+ case HADOOP_XML:
+ // Hadoop_xml_template
+ resolveHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(),
+ tokensForSubstitution, configFile, remoteFile, context);
+ break;
+ case TEMPLATE:
+ // plain-template
+ resolvePlainTemplateAndSaveOnHdfs(fs.getFileSystem(),
+ tokensForSubstitution, configFile, remoteFile, context);
+ break;
+ default:
+ log.info("Not supporting loading src_file for " + configFile);
+ break;
+ }
+ } else {
+ // If src_file is not specified
+ resolvePropsInConfigFileAndSaveOnHdfs(fs, tokensForSubstitution,
+ instance, configFile, fileName, remoteFile);
+ }
+ }
+
+ // Add resource for localization
+ LocalResource configResource =
+ fs.createAmResource(remoteFile, LocalResourceType.FILE);
+ File destFile = new File(configFile.getDestFile());
+ String symlink = APP_CONF_DIR + "/" + fileName;
+ if (destFile.isAbsolute()) {
+ launcher.addLocalResource(symlink, configResource,
+ configFile.getDestFile());
+ log.info("Add config file for localization: " + symlink + " -> "
+ + configResource.getResource().getFile() + ", dest mount path: "
+ + configFile.getDestFile());
+ } else {
+ launcher.addLocalResource(symlink, configResource);
+ log.info("Add config file for localization: " + symlink + " -> "
+ + configResource.getResource().getFile());
+ }
+ }
+ }
+
+ private static void resolvePropsInConfigFileAndSaveOnHdfs(SliderFileSystem fs,
+ Map tokensForSubstitution, ComponentInstance instance,
+ ConfigFile configFile, String fileName, Path remoteFile)
+ throws IOException {
+ // substitute non-template configs
+ substituteMapWithTokens(configFile.getProperties(), tokensForSubstitution);
+
+ // write configs onto hdfs
+ PublishedConfiguration publishedConfiguration =
+ new PublishedConfiguration(fileName,
+ configFile.getProperties().entrySet());
+ if (!fs.getFileSystem().exists(remoteFile)) {
+ PublishedConfigurationOutputter configurationOutputter =
+ PublishedConfigurationOutputter.createOutputter(
+ ConfigFormat.resolve(configFile.getType().toString()),
+ publishedConfiguration);
+ try (FSDataOutputStream os = fs.getFileSystem().create(remoteFile)) {
+ configurationOutputter.save(os);
+ os.flush();
+ }
+ } else {
+ log.info("Component instance = " + instance.getCompInstanceName()
+ + ", config file already exists: " + remoteFile);
+ }
+ }
+
+ // 1. substitute config template - only handle hadoop_xml format
+ // 2. save on hdfs
+ @SuppressWarnings("unchecked")
+ private static void resolveHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs,
+ Map tokensForSubstitution, ConfigFile configFile,
+ Path remoteFile, ServiceContext context) throws IOException {
+ Map conf;
+ try {
+ conf = (Map) context.configCache.get(configFile);
+ } catch (ExecutionException e) {
+ log.info("Failed to load config file: " + configFile, e);
+ return;
+ }
+ // make a copy for substitution
+ org.apache.hadoop.conf.Configuration confCopy =
+ new org.apache.hadoop.conf.Configuration(false);
+ for (Map.Entry entry : conf.entrySet()) {
+ confCopy.set(entry.getKey(), entry.getValue());
+ }
+ // substitute properties
+ for (Map.Entry entry : configFile.getProperties().entrySet()) {
+ confCopy.set(entry.getKey(), entry.getValue());
+ }
+ // substitute env variables
+ for (Map.Entry entry : confCopy) {
+ String val = entry.getValue();
+ if (val != null) {
+ for (Map.Entry token : tokensForSubstitution
+ .entrySet()) {
+ val = val.replaceAll(Pattern.quote(token.getKey()), token.getValue());
+ confCopy.set(entry.getKey(), val);
+ }
+ }
+ }
+ // save on hdfs
+ try (OutputStream output = fs.create(remoteFile)) {
+ confCopy.writeXml(output);
+ log.info("Reading config from: " + configFile.getSrcFile()
+ + ", writing to: " + remoteFile);
+ }
+ }
+
+ // 1) read the template as a string
+ // 2) do token substitution
+ // 3) save on hdfs
+ private static void resolvePlainTemplateAndSaveOnHdfs(FileSystem fs,
+ Map tokensForSubstitution, ConfigFile configFile,
+ Path remoteFile, ServiceContext context) {
+ String content;
+ try {
+ content = (String) context.configCache.get(configFile);
+ } catch (ExecutionException e) {
+ log.info("Failed to load config file: " + configFile, e);
+ return;
+ }
+ // substitute tokens
+ content = substituteStrWithTokens(content, tokensForSubstitution);
+
+ try (OutputStream output = fs.create(remoteFile)) {
+ org.apache.commons.io.IOUtils.write(content, output);
+ } catch (IOException e) {
+ log.info("Failed to create " + remoteFile);
+ }
+ }
+
+ /**
+ * Get initial component token map to be substituted into config values.
+ * @return tokens to replace
+ */
+ public static Map initCompTokensForSubstitute(
+ ComponentInstance instance) {
+ Map tokens = new HashMap<>();
+ tokens.put(COMPONENT_NAME, instance.getCompSpec().getName());
+ tokens
+ .put(COMPONENT_NAME_LC, instance.getCompSpec().getName().toLowerCase());
+ tokens.put(COMPONENT_INSTANCE_NAME, instance.getCompInstanceName());
+ tokens.put(CONTAINER_ID, instance.getContainer().getId().toString());
+ tokens.put(COMPONENT_ID,
+ String.valueOf(instance.getCompInstanceId().getId()));
+ tokens.putAll(instance.getComponent().getDependencyHostIpTokens());
+ return tokens;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java
new file mode 100644
index 00000000000..0920a9ce114
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.defaultImpl;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+
+public class DefaultClientProvider extends AbstractClientProvider {
+
+ public DefaultClientProvider() {
+ }
+
+ @Override
+ public void validateArtifact(Artifact artifact, FileSystem fileSystem) {
+ }
+
+ @Override
+ protected void validateConfigFile(ConfigFile configFile, FileSystem
+ fileSystem) throws IOException {
+ // validate dest_file is not absolute
+ if (Paths.get(configFile.getDestFile()).isAbsolute()) {
+ throw new IllegalArgumentException(
+ "Dest_file must not be absolute path: " + configFile.getDestFile());
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java
new file mode 100644
index 00000000000..868bba8f8dc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.defaultImpl;
+
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.provider.ProviderService;
+import org.apache.hadoop.yarn.service.provider.ProviderFactory;
+
+public final class DefaultProviderFactory extends ProviderFactory {
+ private static final ProviderFactory FACTORY = new
+ DefaultProviderFactory();
+
+ private DefaultProviderFactory() {}
+
+ private static class Client {
+ static final AbstractClientProvider PROVIDER = new DefaultClientProvider();
+ }
+
+ private static class Server {
+ static final ProviderService PROVIDER = new DefaultProviderService();
+ }
+
+ @Override
+ public AbstractClientProvider createClientProvider() {
+ return Client.PROVIDER;
+ }
+
+ @Override
+ public ProviderService createServerProvider() {
+ return Server.PROVIDER;
+ }
+
+ public static ProviderFactory getInstance() {
+ return FACTORY;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java
new file mode 100644
index 00000000000..a3a0c1f96ee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.defaultImpl;
+
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+
+import java.io.IOException;
+
+public class DefaultProviderService extends AbstractProviderService {
+
+ @Override
+ public void processArtifact(AbstractLauncher launcher,
+ ComponentInstance compInstance, SliderFileSystem fileSystem,
+ Service service)
+ throws IOException {
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java
new file mode 100644
index 00000000000..d4a2254ff42
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.docker;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages;
+
+import java.io.IOException;
+
+public class DockerClientProvider extends AbstractClientProvider
+ implements YarnServiceConstants {
+
+ public DockerClientProvider() {
+ super();
+ }
+
+ @Override
+ public void validateArtifact(Artifact artifact, FileSystem fileSystem) {
+ if (artifact == null) {
+ throw new IllegalArgumentException(
+ RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
+ }
+ if (StringUtils.isEmpty(artifact.getId())) {
+ throw new IllegalArgumentException(
+ RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+ }
+ }
+
+ @Override
+ protected void validateConfigFile(ConfigFile configFile, FileSystem
+ fileSystem) throws IOException {
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java
new file mode 100644
index 00000000000..f30c002c612
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.docker;
+
+public interface DockerKeys {
+ String PROVIDER_DOCKER = "docker";
+ String DOCKER_PREFIX = "docker.";
+ String DOCKER_IMAGE = DOCKER_PREFIX + "image";
+ String DOCKER_NETWORK = DOCKER_PREFIX + "network";
+ String DOCKER_USE_PRIVILEGED = DOCKER_PREFIX + "usePrivileged";
+ String DOCKER_START_COMMAND = DOCKER_PREFIX + "startCommand";
+
+ String DEFAULT_DOCKER_NETWORK = "bridge";
+ Boolean DEFAULT_DOCKER_USE_PRIVILEGED = false;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java
new file mode 100644
index 00000000000..57330ab6ad0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.docker;
+
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.provider.ProviderService;
+import org.apache.hadoop.yarn.service.provider.ProviderFactory;
+
+public class DockerProviderFactory extends ProviderFactory {
+ private static final ProviderFactory FACTORY = new
+ DockerProviderFactory();
+
+ private DockerProviderFactory() {
+ }
+
+ private static class Client {
+ static final AbstractClientProvider PROVIDER = new DockerClientProvider();
+ }
+
+ private static class Server {
+ static final ProviderService PROVIDER = new DockerProviderService();
+ }
+
+ @Override
+ public AbstractClientProvider createClientProvider() {
+ return Client.PROVIDER;
+ }
+
+ @Override
+ public ProviderService createServerProvider() {
+ return Server.PROVIDER;
+ }
+
+ public static ProviderFactory getInstance() {
+ return FACTORY;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
new file mode 100644
index 00000000000..0741947d4af
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.docker;
+
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+
+public class DockerProviderService extends AbstractProviderService
+ implements DockerKeys {
+
+ public void processArtifact(AbstractLauncher launcher,
+ ComponentInstance compInstance, SliderFileSystem fileSystem,
+ Service service) throws IOException{
+ launcher.setYarnDockerMode(true);
+ launcher.setDockerImage(compInstance.getCompSpec().getArtifact().getId());
+ launcher.setDockerNetwork(compInstance.getCompSpec().getConfiguration()
+ .getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK));
+ String domain = compInstance.getComponent().getScheduler().getConfig()
+ .get(RegistryConstants.KEY_DNS_DOMAIN);
+ String hostname;
+ if (domain == null || domain.isEmpty()) {
+ hostname = MessageFormat
+ .format("{0}.{1}.{2}", compInstance.getCompInstanceName(),
+ service.getName(), RegistryUtils.currentUser());
+ } else {
+ hostname = MessageFormat
+ .format("{0}.{1}.{2}.{3}", compInstance.getCompInstanceName(),
+ service.getName(), RegistryUtils.currentUser(), domain);
+ }
+ launcher.setDockerHostname(hostname);
+ launcher.setRunPrivilegedContainer(
+ compInstance.getCompSpec().getRunPrivilegedContainer());
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java
new file mode 100644
index 00000000000..01f7b209aae
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.tarball;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+
+public class TarballClientProvider extends AbstractClientProvider
+ implements YarnServiceConstants {
+
+ public TarballClientProvider() {
+ }
+
+ @Override
+ public void validateArtifact(Artifact artifact, FileSystem fs)
+ throws IOException {
+ if (artifact == null) {
+ throw new IllegalArgumentException(
+ RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
+ }
+ if (StringUtils.isEmpty(artifact.getId())) {
+ throw new IllegalArgumentException(
+ RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+ }
+ Path p = new Path(artifact.getId());
+ if (!fs.exists(p)) {
+ throw new IllegalArgumentException( "Artifact tarball does not exist "
+ + artifact.getId());
+ }
+ }
+
+ @Override
+ protected void validateConfigFile(ConfigFile configFile, FileSystem
+ fileSystem) throws IOException {
+ // validate dest_file is not absolute
+ if (Paths.get(configFile.getDestFile()).isAbsolute()) {
+ throw new IllegalArgumentException(
+ "Dest_file must not be absolute path: " + configFile.getDestFile());
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java
new file mode 100644
index 00000000000..9d81f66e6b2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.tarball;
+
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.provider.ProviderService;
+import org.apache.hadoop.yarn.service.provider.ProviderFactory;
+
+public class TarballProviderFactory extends ProviderFactory {
+ private static final ProviderFactory FACTORY = new
+ TarballProviderFactory();
+
+ private TarballProviderFactory() {
+ }
+
+ private static class Client {
+ static final AbstractClientProvider PROVIDER = new TarballClientProvider();
+ }
+
+ private static class Server {
+ static final ProviderService PROVIDER = new TarballProviderService();
+ }
+
+ @Override
+ public AbstractClientProvider createClientProvider() {
+ return Client.PROVIDER;
+ }
+
+ @Override
+ public ProviderService createServerProvider() {
+ return Server.PROVIDER;
+ }
+
+ public static ProviderFactory getInstance() {
+ return FACTORY;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java
new file mode 100644
index 00000000000..9f29c8be31b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.provider.tarball;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
+import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
+import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
+
+import java.io.IOException;
+
+public class TarballProviderService extends AbstractProviderService {
+
+ @Override
+ public void processArtifact(AbstractLauncher launcher,
+ ComponentInstance instance, SliderFileSystem fileSystem,
+ Service service)
+ throws IOException {
+ Path artifact = new Path(instance.getCompSpec().getArtifact().getId());
+ if (!fileSystem.isFile(artifact)) {
+ throw new IOException(
+ "Package doesn't exist as a resource: " + artifact.toString());
+ }
+ log.info("Adding resource {}", artifact.toString());
+ LocalResourceType type = LocalResourceType.ARCHIVE;
+ LocalResource packageResource = fileSystem.createAmResource(artifact, type);
+ launcher.addLocalResource(APP_LIB_DIR, packageResource);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java
new file mode 100644
index 00000000000..56634f678c9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.registry;
+
+/**
+ * These are constants unique to the Slider AM
+ */
+public class CustomRegistryConstants {
+
+ public static final String MANAGEMENT_REST_API =
+ "classpath:org.apache.slider.management";
+
+ public static final String REGISTRY_REST_API =
+ "classpath:org.apache.slider.registry";
+
+ public static final String PUBLISHER_REST_API =
+ "classpath:org.apache.slider.publisher";
+
+ public static final String PUBLISHER_CONFIGURATIONS_API =
+ "classpath:org.apache.slider.publisher.configurations";
+
+ public static final String PUBLISHER_EXPORTS_API =
+ "classpath:org.apache.slider.publisher.exports";
+
+ public static final String PUBLISHER_DOCUMENTS_API =
+ "classpath:org.apache.slider.publisher.documents";
+
+ public static final String AGENT_SECURE_REST_API =
+ "classpath:org.apache.slider.agents.secure";
+
+ public static final String AGENT_ONEWAY_REST_API =
+ "classpath:org.apache.slider.agents.oneway";
+
+ public static final String AM_IPC_PROTOCOL =
+ "classpath:org.apache.slider.appmaster.ipc";
+
+ public static final String AM_REST_BASE =
+ "classpath:org.apache.slider.client.rest";
+
+ public static final String WEB_UI = "http://";
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java
new file mode 100644
index 00000000000..cecca5f6cf2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.registry;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.registry.client.api.BindFlags;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.join;
+
+/**
+ * Registry view for providers. This tracks where the service
+ * is registered, offers access to the record and other things.
+ */
+public class YarnRegistryViewForProviders {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(YarnRegistryViewForProviders.class);
+
+ private final RegistryOperations registryOperations;
+ private final String user;
+ private final String serviceClass;
+ private final String instanceName;
+ /**
+ * Record used where the service registered itself.
+ * Null until the service is registered
+ */
+ private ServiceRecord selfRegistration;
+
+ /**
+ * Path where record was registered.
+ * Null until the service is registered
+ */
+ private String selfRegistrationPath;
+
+ public YarnRegistryViewForProviders(RegistryOperations registryOperations,
+ String user,
+ String serviceClass,
+ String instanceName,
+ ApplicationAttemptId applicationAttemptId) {
+ Preconditions.checkArgument(registryOperations != null,
+ "null registry operations");
+ Preconditions.checkArgument(user != null, "null user");
+ Preconditions.checkArgument(ServiceUtils.isSet(serviceClass),
+ "unset service class");
+ Preconditions.checkArgument(ServiceUtils.isSet(instanceName),
+ "instanceName");
+ Preconditions.checkArgument(applicationAttemptId != null,
+ "null applicationAttemptId");
+ this.registryOperations = registryOperations;
+ this.user = user;
+ this.serviceClass = serviceClass;
+ this.instanceName = instanceName;
+ }
+
+ public String getUser() {
+ return user;
+ }
+
+
+ private void setSelfRegistration(ServiceRecord selfRegistration) {
+ this.selfRegistration = selfRegistration;
+ }
+
+ /**
+ * Get the path to where the service has registered itself.
+ * Null until the service is registered
+ * @return the service registration path.
+ */
+ public String getSelfRegistrationPath() {
+ return selfRegistrationPath;
+ }
+
+ /**
+ * Get the absolute path to where the service has registered itself.
+ * This includes the base registry path
+ * Null until the service is registered
+ * @return the service registration path.
+ */
+ public String getAbsoluteSelfRegistrationPath() {
+ if (selfRegistrationPath == null) {
+ return null;
+ }
+ String root = registryOperations.getConfig().getTrimmed(
+ RegistryConstants.KEY_REGISTRY_ZK_ROOT,
+ RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
+ return RegistryPathUtils.join(root, selfRegistrationPath);
+ }
+
+ /**
+ * Add a component under the slider name/entry.
+ * @param componentName component name
+ * @param record record to put
+ * @throws IOException
+ */
+ public void putComponent(String componentName,
+ ServiceRecord record) throws
+ IOException {
+ putComponent(serviceClass, instanceName,
+ componentName,
+ record);
+ }
+
+ /**
+ * Add a component.
+ * @param serviceClass service class to use under ~user
+ * @param componentName component name
+ * @param record record to put
+ * @throws IOException
+ */
+ public void putComponent(String serviceClass,
+ String serviceName,
+ String componentName,
+ ServiceRecord record) throws IOException {
+ String path = RegistryUtils.componentPath(
+ user, serviceClass, serviceName, componentName);
+ registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
+ registryOperations.bind(path, record, BindFlags.OVERWRITE);
+ }
+
+ /**
+ * Get a component.
+ * @param componentName component name
+ * @return the service record
+ * @throws IOException
+ */
+ public ServiceRecord getComponent(String componentName) throws IOException {
+ String path = RegistryUtils.componentPath(
+ user, serviceClass, instanceName, componentName);
+ LOG.info("Resolving path {}", path);
+ return registryOperations.resolve(path);
+ }
+
+ /**
+ * List components.
+ * @return a list of components
+ * @throws IOException
+ */
+ public List listComponents() throws IOException {
+ String path = RegistryUtils.componentListPath(
+ user, serviceClass, instanceName);
+ return registryOperations.list(path);
+ }
+
+ /**
+ * Add a service under a path, optionally purging any history.
+ * @param username user
+ * @param serviceClass service class to use under ~user
+ * @param serviceName name of the service
+ * @param record service record
+ * @param deleteTreeFirst perform recursive delete of the path first.
+ * @return the path the service was created at
+ * @throws IOException
+ */
+ public String putService(String username,
+ String serviceClass,
+ String serviceName,
+ ServiceRecord record,
+ boolean deleteTreeFirst) throws IOException {
+ String path = RegistryUtils.servicePath(
+ username, serviceClass, serviceName);
+ if (deleteTreeFirst) {
+ registryOperations.delete(path, true);
+ }
+ registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
+ registryOperations.bind(path, record, BindFlags.OVERWRITE);
+ return path;
+ }
+
+ /**
+ * Add a service under a path for the current user.
+ * @param record service record
+ * @param deleteTreeFirst perform recursive delete of the path first
+ * @return the path the service was created at
+ * @throws IOException
+ */
+ public String registerSelf(
+ ServiceRecord record,
+ boolean deleteTreeFirst) throws IOException {
+ selfRegistrationPath =
+ putService(user, serviceClass, instanceName, record, deleteTreeFirst);
+ setSelfRegistration(record);
+ return selfRegistrationPath;
+ }
+
+ /**
+ * Delete a component.
+ * @param containerId component name
+ * @throws IOException
+ */
+ public void deleteComponent(ComponentInstanceId instanceId,
+ String containerId) throws IOException {
+ String path = RegistryUtils.componentPath(
+ user, serviceClass, instanceName,
+ containerId);
+ LOG.info(instanceId + ": Deleting registry path " + path);
+ registryOperations.delete(path, false);
+ }
+
+ /**
+ * Delete the children of a path -but not the path itself.
+ * It is not an error if the path does not exist
+ * @param path path to delete
+ * @param recursive flag to request recursive deletes
+ * @throws IOException IO problems
+ */
+ public void deleteChildren(String path, boolean recursive) throws IOException {
+ List childNames = null;
+ try {
+ childNames = registryOperations.list(path);
+ } catch (PathNotFoundException e) {
+ return;
+ }
+ for (String childName : childNames) {
+ String child = join(path, childName);
+ registryOperations.delete(child, recursive);
+ }
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
new file mode 100644
index 00000000000..cf4e836c01c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.timelineservice;
+
+import org.apache.commons.configuration2.SubsetConfiguration;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Write the metrics to a ATSv2. Generally, this class is instantiated via
+ * hadoop-metrics2 property files. Specifically, you would create this class by
+ * adding the following to by This would actually be set as:
+ * [prefix].sink.[some instance name].class
+ * =org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink
+ *
, where prefix is "atsv2": and some instance name is
+ * just any unique name, so properties can be differentiated if there are
+ * multiple sinks of the same type created
+ */
+public class ServiceMetricsSink implements MetricsSink {
+
+ private static final Logger log =
+ LoggerFactory.getLogger(ServiceMetricsSink.class);
+
+ private ServiceTimelinePublisher serviceTimelinePublisher;
+
+ public ServiceMetricsSink() {
+
+ }
+
+ public ServiceMetricsSink(ServiceTimelinePublisher publisher) {
+ serviceTimelinePublisher = publisher;
+ }
+
+ /**
+ * Publishes service and component metrics to ATS.
+ */
+ @Override
+ public void putMetrics(MetricsRecord record) {
+ if (serviceTimelinePublisher.isStopped()) {
+ log.warn("ServiceTimelinePublisher has stopped. "
+ + "Not publishing any more metrics to ATS.");
+ return;
+ }
+
+ boolean isServiceMetrics = false;
+ boolean isComponentMetrics = false;
+ String appId = null;
+ for (MetricsTag tag : record.tags()) {
+ if (tag.name().equals("type") && tag.value().equals("service")) {
+ isServiceMetrics = true;
+ } else if (tag.name().equals("type") && tag.value().equals("component")) {
+ isComponentMetrics = true;
+ break; // if component metrics, no more information required from tag so
+ // break the loop
+ } else if (tag.name().equals("appId")) {
+ appId = tag.value();
+ }
+ }
+
+ if (isServiceMetrics && appId != null) {
+ if (log.isDebugEnabled()) {
+ log.debug("Publishing service metrics. " + record);
+ }
+ serviceTimelinePublisher.publishMetrics(record.metrics(), appId,
+ ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(),
+ record.timestamp());
+ } else if (isComponentMetrics) {
+ if (log.isDebugEnabled()) {
+ log.debug("Publishing Component metrics. " + record);
+ }
+ serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(),
+ ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp());
+ }
+ }
+
+ @Override
+ public void init(SubsetConfiguration conf) {
+ }
+
+ @Override
+ public void flush() {
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java
new file mode 100644
index 00000000000..d5c95394aa4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.timelineservice;
+
+/**
+ * Slider entities that are published to ATS.
+ */
+public enum ServiceTimelineEntityType {
+ /**
+ * Used for publishing service entity information.
+ */
+ SERVICE_ATTEMPT,
+
+ /**
+ * Used for publishing component entity information.
+ */
+ COMPONENT,
+
+ /**
+ * Used for publishing component instance entity information.
+ */
+ COMPONENT_INSTANCE
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java
new file mode 100644
index 00000000000..6c3428a748d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.timelineservice;
+
+/**
+ * Events that are used to store in ATS.
+ */
+public enum ServiceTimelineEvent {
+ SERVICE_ATTEMPT_REGISTERED,
+
+ SERVICE_ATTEMPT_UNREGISTERED,
+
+ COMPONENT_INSTANCE_REGISTERED,
+
+ COMPONENT_INSTANCE_UNREGISTERED,
+
+ COMPONENT_INSTANCE_IP_HOST_UPDATE,
+
+ COMPONENT_INSTANCE_BECOME_READY
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java
new file mode 100644
index 00000000000..78a71713d90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.timelineservice;
+
+/**
+ * Constants which are stored as key in ATS
+ */
+public final class ServiceTimelineMetricsConstants {
+
+ public static final String URI = "URI";
+
+ public static final String NAME = "NAME";
+
+ public static final String STATE = "STATE";
+
+ public static final String EXIT_STATUS_CODE = "EXIT_STATUS_CODE";
+
+ public static final String EXIT_REASON = "EXIT_REASON";
+
+ public static final String DIAGNOSTICS_INFO = "DIAGNOSTICS_INFO";
+
+ public static final String LAUNCH_TIME = "LAUNCH_TIME";
+
+ public static final String QUICK_LINKS = "QUICK_LINKS";
+
+ public static final String LAUNCH_COMMAND = "LAUNCH_COMMAND";
+
+ public static final String TOTAL_CONTAINERS = "NUMBER_OF_CONTAINERS";
+
+ public static final String RUNNING_CONTAINERS =
+ "NUMBER_OF_RUNNING_CONTAINERS";
+
+ /**
+ * Artifacts constants.
+ */
+ public static final String ARTIFACT_ID = "ARTIFACT_ID";
+
+ public static final String ARTIFACT_TYPE = "ARTIFACT_TYPE";
+
+ public static final String ARTIFACT_URI = "ARTIFACT_URI";
+
+ /**
+ * Resource constants.
+ */
+ public static final String RESOURCE_CPU = "RESOURCE_CPU";
+
+ public static final String RESOURCE_MEMORY = "RESOURCE_MEMORY";
+
+ public static final String RESOURCE_PROFILE = "RESOURCE_PROFILE";
+
+ /**
+ * component instance constants.
+ */
+ public static final String IP = "IP";
+
+ public static final String HOSTNAME = "HOSTNAME";
+
+ public static final String BARE_HOST = "BARE_HOST";
+
+ public static final String COMPONENT_NAME = "COMPONENT_NAME";
+
+ public static final String COMPONENT_INSTANCE_NAME = "COMPONENT_INSTANCE_NAME";
+
+ /**
+ * component constants.
+ */
+ public static final String DEPENDENCIES = "DEPENDENCIES";
+
+ public static final String DESCRIPTION = "DESCRIPTION";
+
+ public static final String RUN_PRIVILEGED_CONTAINER =
+ "RUN_PRIVILEGED_CONTAINER";
+
+ public static final String PLACEMENT_POLICY = "PLACEMENT_POLICY";
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java
new file mode 100644
index 00000000000..c5229868005
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.timelineservice;
+
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
+import org.apache.hadoop.yarn.service.ServiceContext;
+import org.apache.hadoop.yarn.service.api.records.*;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.service.api.records.ContainerState.READY;
+import static org.apache.hadoop.yarn.service.api.records.ContainerState.STOPPED;
+import static org.apache.hadoop.yarn.service.timelineservice.ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO;
+
+/**
+ * A single service that publishes all the Timeline Entities.
+ */
+public class ServiceTimelinePublisher extends CompositeService {
+
+ // Number of bytes of config which can be published in one shot to ATSv2.
+ public static final int ATS_CONFIG_PUBLISH_SIZE_BYTES = 10 * 1024;
+
+ private TimelineV2Client timelineClient;
+
+ private volatile boolean stopped = false;
+
+ private static final Logger log =
+ LoggerFactory.getLogger(ServiceTimelinePublisher.class);
+
+ @Override
+ protected void serviceInit(org.apache.hadoop.conf.Configuration configuration)
+ throws Exception {
+ addService(timelineClient);
+ super.serviceInit(configuration);
+ }
+
+
+ @Override
+ protected void serviceStop() throws Exception {
+ stopped = true;
+ super.serviceStop();
+ }
+
+ public boolean isStopped() {
+ return stopped;
+ }
+
+ public ServiceTimelinePublisher(TimelineV2Client client) {
+ super(ServiceTimelinePublisher.class.getName());
+ timelineClient = client;
+ }
+
+ public void serviceAttemptRegistered(Service service,
+ org.apache.hadoop.conf.Configuration systemConf) {
+ long currentTimeMillis = service.getLaunchTime() == null
+ ? System.currentTimeMillis() : service.getLaunchTime().getTime();
+
+ TimelineEntity entity = createServiceAttemptEntity(service.getId());
+ entity.setCreatedTime(currentTimeMillis);
+
+ // create info keys
+ Map entityInfos = new HashMap();
+ entityInfos.put(ServiceTimelineMetricsConstants.NAME, service.getName());
+ entityInfos.put(ServiceTimelineMetricsConstants.STATE,
+ ServiceState.STARTED.toString());
+ entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME,
+ currentTimeMillis);
+ entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS,
+ service.getQuicklinks());
+ entity.addInfo(entityInfos);
+
+ // add an event
+ TimelineEvent startEvent = new TimelineEvent();
+ startEvent.setId(ServiceTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString());
+ startEvent.setTimestamp(currentTimeMillis);
+ entity.addEvent(startEvent);
+
+ // publish before configurations published
+ putEntity(entity);
+
+ // publish system config - YarnConfiguration
+ populateTimelineEntity(systemConf.iterator(), service.getId(),
+ ServiceTimelineEntityType.SERVICE_ATTEMPT.toString());
+ // publish container conf
+ publishContainerConf(service.getConfiguration(), service.getId(),
+ ServiceTimelineEntityType.SERVICE_ATTEMPT.toString());
+
+ // publish component as separate entity.
+ publishComponents(service.getComponents());
+ }
+
+ public void serviceAttemptUpdated(Service service) {
+ TimelineEntity entity = createServiceAttemptEntity(service.getId());
+ entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS,
+ service.getQuicklinks());
+ putEntity(entity);
+ }
+
+ public void serviceAttemptUnregistered(ServiceContext context,
+ String diagnostics) {
+ TimelineEntity entity = createServiceAttemptEntity(
+ context.attemptId.getApplicationId().toString());
+ Map entityInfos = new HashMap();
+ entityInfos.put(ServiceTimelineMetricsConstants.STATE,
+ FinalApplicationStatus.ENDED);
+ entityInfos.put(DIAGNOSTICS_INFO, diagnostics);
+ entity.addInfo(entityInfos);
+
+ // add an event
+ TimelineEvent finishEvent = new TimelineEvent();
+ finishEvent
+ .setId(ServiceTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString());
+ finishEvent.setTimestamp(System.currentTimeMillis());
+ entity.addEvent(finishEvent);
+
+ putEntity(entity);
+ }
+
+ public void componentInstanceStarted(Container container,
+ ComponentInstance instance) {
+
+ TimelineEntity entity = createComponentInstanceEntity(container.getId());
+ entity.setCreatedTime(container.getLaunchTime().getTime());
+
+ // create info keys
+ Map entityInfos = new HashMap();
+ entityInfos.put(ServiceTimelineMetricsConstants.BARE_HOST,
+ container.getBareHost());
+ entityInfos.put(ServiceTimelineMetricsConstants.STATE,
+ container.getState().toString());
+ entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME,
+ container.getLaunchTime().getTime());
+ entityInfos.put(ServiceTimelineMetricsConstants.COMPONENT_NAME,
+ instance.getCompName());
+ entityInfos.put(ServiceTimelineMetricsConstants.COMPONENT_INSTANCE_NAME,
+ instance.getCompInstanceName());
+ entity.addInfo(entityInfos);
+
+ // add an event
+ TimelineEvent startEvent = new TimelineEvent();
+ startEvent
+ .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_REGISTERED.toString());
+ startEvent.setTimestamp(container.getLaunchTime().getTime());
+ entity.addEvent(startEvent);
+
+ putEntity(entity);
+ }
+
+ public void componentInstanceFinished(ComponentInstance instance,
+ int exitCode, String diagnostics) {
+ TimelineEntity entity = createComponentInstanceEntity(
+ instance.getContainer().getId().toString());
+
+ // create info keys
+ Map entityInfos = new HashMap();
+ entityInfos.put(ServiceTimelineMetricsConstants.EXIT_STATUS_CODE,
+ exitCode);
+ entityInfos.put(DIAGNOSTICS_INFO, diagnostics);
+ entityInfos.put(ServiceTimelineMetricsConstants.STATE, STOPPED);
+ entity.addInfo(entityInfos);
+
+ // add an event
+ TimelineEvent startEvent = new TimelineEvent();
+ startEvent
+ .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_UNREGISTERED.toString());
+ startEvent.setTimestamp(System.currentTimeMillis());
+ entity.addEvent(startEvent);
+
+ putEntity(entity);
+ }
+
+ public void componentInstanceIPHostUpdated(Container container) {
+ TimelineEntity entity = createComponentInstanceEntity(container.getId());
+
+ // create info keys
+ Map entityInfos = new HashMap();
+ entityInfos.put(ServiceTimelineMetricsConstants.IP, container.getIp());
+ entityInfos.put(ServiceTimelineMetricsConstants.HOSTNAME,
+ container.getHostname());
+ entityInfos.put(ServiceTimelineMetricsConstants.STATE,
+ container.getState().toString());
+ entity.addInfo(entityInfos);
+
+ TimelineEvent updateEvent = new TimelineEvent();
+ updateEvent.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_IP_HOST_UPDATE
+ .toString());
+ updateEvent.setTimestamp(System.currentTimeMillis());
+ entity.addEvent(updateEvent);
+
+ putEntity(entity);
+ }
+
+ public void componentInstanceBecomeReady(Container container) {
+ TimelineEntity entity = createComponentInstanceEntity(container.getId());
+ Map entityInfo = new HashMap<>();
+ entityInfo.put(ServiceTimelineMetricsConstants.STATE, READY);
+ entity.addInfo(entityInfo);
+ TimelineEvent updateEvent = new TimelineEvent();
+ updateEvent.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_BECOME_READY
+ .toString());
+ updateEvent.setTimestamp(System.currentTimeMillis());
+ entity.addEvent(updateEvent);
+ putEntity(entity);
+ }
+
+ private void publishComponents(List components) {
+ long currentTimeMillis = System.currentTimeMillis();
+ for (Component component : components) {
+ TimelineEntity entity = createComponentEntity(component.getName());
+ entity.setCreatedTime(currentTimeMillis);
+
+ // create info keys
+ Map entityInfos = new HashMap();
+ if (component.getArtifact() != null) {
+ entityInfos.put(ServiceTimelineMetricsConstants.ARTIFACT_ID,
+ component.getArtifact().getId());
+ entityInfos.put(ServiceTimelineMetricsConstants.ARTIFACT_TYPE,
+ component.getArtifact().getType().toString());
+ }
+
+ if (component.getResource() != null) {
+ entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_CPU,
+ component.getResource().getCpus());
+ entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_MEMORY,
+ component.getResource().getMemory());
+ if (component.getResource().getProfile() != null) {
+ entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_PROFILE,
+ component.getResource().getProfile());
+ }
+ }
+
+ if (component.getLaunchCommand() != null) {
+ entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_COMMAND,
+ component.getLaunchCommand());
+ }
+ entityInfos.put(ServiceTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER,
+ component.getRunPrivilegedContainer().toString());
+ if (component.getPlacementPolicy() != null) {
+ entityInfos.put(ServiceTimelineMetricsConstants.PLACEMENT_POLICY,
+ component.getPlacementPolicy().getLabel());
+ }
+ entity.addInfo(entityInfos);
+
+ putEntity(entity);
+
+ // publish container specific configurations
+ publishContainerConf(component.getConfiguration(), component.getName(),
+ ServiceTimelineEntityType.COMPONENT.toString());
+ }
+ }
+
+ private void publishContainerConf(Configuration configuration,
+ String entityId, String entityType) {
+ populateTimelineEntity(configuration.getEnv().entrySet().iterator(),
+ entityId, entityType);
+
+ for (ConfigFile configFile : configuration.getFiles()) {
+ populateTimelineEntity(configFile.getProperties().entrySet().iterator(),
+ entityId, entityType);
+ }
+ }
+
+ private void populateTimelineEntity(Iterator> iterator,
+ String entityId, String entityType) {
+ int configSize = 0;
+ TimelineEntity entity = createTimelineEntity(entityId, entityType);
+ while (iterator.hasNext()) {
+ Entry entry = iterator.next();
+ int size = entry.getKey().length() + entry.getValue().length();
+ configSize += size;
+ // Configs are split into multiple entities if they exceed 100kb in size.
+ if (configSize > ATS_CONFIG_PUBLISH_SIZE_BYTES) {
+ if (entity.getConfigs().size() > 0) {
+ putEntity(entity);
+ entity = createTimelineEntity(entityId, entityType);
+ }
+ configSize = size;
+ }
+ entity.addConfig(entry.getKey(), entry.getValue());
+ }
+ if (configSize > 0) {
+ putEntity(entity);
+ }
+ }
+
+ /**
+ * Called from ServiceMetricsSink at regular interval of time.
+ * @param metrics of service or components
+ * @param entityId Id of entity
+ * @param entityType Type of entity
+ * @param timestamp
+ */
+ public void publishMetrics(Iterable metrics, String entityId,
+ String entityType, long timestamp) {
+ TimelineEntity entity = createTimelineEntity(entityId, entityType);
+ Set entityMetrics = new HashSet();
+ for (AbstractMetric metric : metrics) {
+ TimelineMetric timelineMetric = new TimelineMetric();
+ timelineMetric.setId(metric.name());
+ timelineMetric.addValue(timestamp, metric.value());
+ entityMetrics.add(timelineMetric);
+ }
+ entity.setMetrics(entityMetrics);
+ putEntity(entity);
+ }
+
+ private TimelineEntity createServiceAttemptEntity(String serviceId) {
+ TimelineEntity entity = createTimelineEntity(serviceId,
+ ServiceTimelineEntityType.SERVICE_ATTEMPT.toString());
+ return entity;
+ }
+
+ private TimelineEntity createComponentInstanceEntity(String instanceId) {
+ TimelineEntity entity = createTimelineEntity(instanceId,
+ ServiceTimelineEntityType.COMPONENT_INSTANCE.toString());
+ return entity;
+ }
+
+ private TimelineEntity createComponentEntity(String componentId) {
+ TimelineEntity entity = createTimelineEntity(componentId,
+ ServiceTimelineEntityType.COMPONENT.toString());
+ return entity;
+ }
+
+ private TimelineEntity createTimelineEntity(String entityId,
+ String entityType) {
+ TimelineEntity entity = new TimelineEntity();
+ entity.setId(entityId);
+ entity.setType(entityType);
+ return entity;
+ }
+
+ private void putEntity(TimelineEntity entity) {
+ try {
+ if (log.isDebugEnabled()) {
+ log.debug("Publishing the entity " + entity + ", JSON-style content: "
+ + TimelineUtils.dumpTimelineRecordtoJSON(entity));
+ }
+ if (timelineClient != null) {
+ timelineClient.putEntitiesAsync(entity);
+ } else {
+ log.error("Seems like client has been removed before the entity "
+ + "could be published for " + entity);
+ }
+ } catch (Exception e) {
+ log.error("Error when publishing entity " + entity, e);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java
new file mode 100644
index 00000000000..72f7842b836
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * ATS implementation
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.service.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java
new file mode 100644
index 00000000000..2607c084be8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.map.JsonMappingException;
+
+import java.io.IOException;
+
+/**
+ * Persistence of {@link SerializedApplicationReport}
+ *
+ */
+public class ApplicationReportSerDeser
+ extends JsonSerDeser {
+ public ApplicationReportSerDeser() {
+ super(SerializedApplicationReport.class);
+ }
+
+
+ private static final ApplicationReportSerDeser
+ staticinstance = new ApplicationReportSerDeser();
+
+ /**
+ * Convert an instance to a JSON string -sync access to a shared ser/deser
+ * object instance
+ * @param instance object to convert
+ * @return a JSON string description
+ * @throws JsonParseException parse problems
+ * @throws JsonMappingException O/J mapping problems
+ */
+ public static String toString(SerializedApplicationReport instance)
+ throws IOException, JsonGenerationException, JsonMappingException {
+ synchronized (staticinstance) {
+ return staticinstance.toJson(instance);
+ }
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java
new file mode 100644
index 00000000000..86896b2b694
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
+import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.encodeForRegistry;
+import static org.apache.hadoop.registry.client.binding.RegistryUtils.convertUsername;
+import static org.apache.hadoop.registry.client.binding.RegistryUtils.getCurrentUsernameUnencoded;
+import static org.apache.hadoop.registry.client.binding.RegistryUtils.servicePath;
+
+/**
+ * Generic code to get the URLs for clients via the registry
+ */
+public class ClientRegistryBinder {
+ private static final Logger log =
+ LoggerFactory.getLogger(ClientRegistryBinder.class);
+
+ private final RegistryOperations operations;
+
+ public ClientRegistryBinder(RegistryOperations operations) {
+ this.operations = operations;
+ }
+
+ /**
+ * Buld the user path -switches to the system path if the user is "".
+ * It also cross-converts the username to ascii via punycode
+ * @param username username or ""
+ * @return the path to the user
+ */
+ public static String homePathForUser(String username) {
+ Preconditions.checkArgument(username != null, "null user");
+
+ // catch recursion
+ if (username.startsWith(RegistryConstants.PATH_USERS)) {
+ return username;
+ }
+
+ if (username.isEmpty()) {
+ return RegistryConstants.PATH_SYSTEM_SERVICES;
+ }
+
+ // convert username to registry name
+ String convertedName = convertUsername(username);
+
+ return RegistryPathUtils.join(RegistryConstants.PATH_USERS,
+ encodeForRegistry(convertedName));
+ }
+
+ /**
+ * Get the current username, before any encoding has been applied.
+ * @return the current user from the kerberos identity, falling back
+ * to the user and/or env variables.
+ */
+ public static String currentUsernameUnencoded() {
+ String env_hadoop_username = System.getenv(
+ RegistryInternalConstants.HADOOP_USER_NAME);
+ return getCurrentUsernameUnencoded(env_hadoop_username);
+ }
+
+ /**
+ * Qualify a user.
+ *
+ * -
"~"
maps to user home path home
+ * -
"~user"
maps to /users/$user
+ * -
"/"
maps to /services/
+ *
+ * @param user the username
+ * @return the base path
+ */
+ public static String qualifyUser(String user) {
+ // qualify the user
+ String t = user.trim();
+ if (t.startsWith("/")) {
+ // already resolved
+ return t;
+ } else if (t.equals("~")) {
+ // self
+ return currentUsernameUnencoded();
+ } else if (t.startsWith("~")) {
+ // another user
+ // convert username to registry name
+ String convertedName = convertUsername(t.substring(1));
+
+ return RegistryPathUtils.join(RegistryConstants.PATH_USERS,
+ encodeForRegistry(convertedName));
+ } else {
+ return "/" + t;
+ }
+ }
+
+ /**
+ * Look up an external REST API
+ * @param user user which will be qualified as per {@link #qualifyUser(String)}
+ * @param serviceClass service class
+ * @param instance instance name
+ * @param api API
+ * @return the API, or an exception is raised.
+ * @throws IOException
+ */
+ public String lookupExternalRestAPI(String user,
+ String serviceClass,
+ String instance,
+ String api)
+ throws IOException {
+ String qualified = qualifyUser(user);
+ String path = servicePath(qualified, serviceClass, instance);
+ String restAPI = resolveExternalRestAPI(api, path);
+ if (restAPI == null) {
+ throw new PathNotFoundException(path + " API " + api);
+ }
+ return restAPI;
+ }
+
+ /**
+ * Resolve a service record then return an external REST API exported it.
+ *
+ * @param api API to resolve
+ * @param path path of the service record
+ * @return null if the record exists but the API is absent or it has no
+ * REST endpoints.
+ * @throws IOException resolution problems, as covered in
+ * {@link RegistryOperations#resolve(String)}
+ */
+ protected String resolveExternalRestAPI(String api, String path) throws
+ IOException {
+ ServiceRecord record = operations.resolve(path);
+ return lookupRestAPI(record, api, true);
+ }
+
+ /**
+ * Look up an external REST API endpoint
+ * @param record service record
+ * @param api URI of api
+ * @param external flag to indicate this is an external record
+ * @return the first endpoint of the implementation, or null if there
+ * is no entry for the API, implementation or it's the wrong type.
+ */
+ public static String lookupRestAPI(ServiceRecord record,
+ String api, boolean external) throws InvalidRecordException {
+ try {
+ String url = null;
+ Endpoint endpoint = getEndpoint(record, api, external);
+ List addresses =
+ RegistryTypeUtils.retrieveAddressesUriType(endpoint);
+ if (addresses != null && !addresses.isEmpty()) {
+ url = addresses.get(0);
+ }
+ return url;
+ } catch (InvalidRecordException e) {
+ log.debug("looking for API {}", api, e);
+ return null;
+ }
+ }
+
+ /**
+ * Get an endpont by API
+ * @param record service record
+ * @param api API
+ * @param external flag to indicate this is an external record
+ * @return the endpoint or null
+ */
+ public static Endpoint getEndpoint(ServiceRecord record,
+ String api,
+ boolean external) {
+ return external ? record.getExternalEndpoint(api)
+ : record.getInternalEndpoint(api);
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java
new file mode 100644
index 00000000000..9f0e5d40a57
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+/**
+ * Some general comparators
+ */
+public class Comparators {
+
+ public static class LongComparator implements Comparator, Serializable {
+ @Override
+ public int compare(Long o1, Long o2) {
+ return o1.compareTo(o2);
+ }
+ }
+
+ public static class InvertedLongComparator
+ implements Comparator, Serializable {
+ @Override
+ public int compare(Long o1, Long o2) {
+ return o2.compareTo(o1);
+ }
+ }
+
+ /**
+ * Little template class to reverse any comparitor
+ * @param the type that is being compared
+ */
+ public static class ComparatorReverser implements Comparator,
+ Serializable {
+
+ final Comparator