From fd9d0d4ead503faa6a9d04ad2d7d304f1e3b05b4 Mon Sep 17 00:00:00 2001 From: Jian He Date: Thu, 30 Mar 2017 16:07:18 +0800 Subject: [PATCH] YARN-6400. Remove some unneeded code after YARN-6255. Contributed by Jian He --- .../apache/slider/api/ClusterDescription.java | 795 ------------------ .../slider/api/ClusterDescriptionKeys.java | 25 - .../api/ClusterDescriptionOperations.java | 93 -- .../slider/api/SliderApplicationApi.java | 159 ---- .../slider/api/SliderClusterProtocol.java | 14 - .../types/ApplicationLivenessInformation.java | 3 - .../slider/api/types/RestTypeMarshalling.java | 27 - .../apache/slider/client/SliderClient.java | 437 +++------- .../ipc/SliderApplicationIpcClient.java | 234 ------ .../client/ipc/SliderClusterOperations.java | 127 +-- .../slider/client/rest/RestClientFactory.java | 89 -- .../rest/SliderApplicationApiRestClient.java | 289 ------- .../AbstractClusterBuildingActionArgs.java | 11 - .../slider/common/tools/CoreFileSystem.java | 74 -- .../slider/common/tools/SliderUtils.java | 116 --- .../core/buildutils/InstanceBuilder.java | 520 ------------ .../slider/core/buildutils/InstanceIO.java | 83 -- .../AbstractInputPropertiesValidator.java | 49 -- .../slider/core/conf/AggregateConf.java | 198 ----- .../org/apache/slider/core/conf/ConfTree.java | 101 --- .../slider/core/conf/ConfTreeOperations.java | 527 ------------ .../core/conf/InputPropertiesValidator.java | 27 - .../ResourcesInputPropertiesValidator.java | 41 - .../TemplateInputPropertiesValidator.java | 38 - .../slider/core/launch/AbstractLauncher.java | 55 -- .../core/persist/AggregateConfSerDeser.java | 55 -- .../slider/core/persist/ConfPersister.java | 286 ------- .../slider/core/persist/ConfTreeSerDeser.java | 54 -- .../persist/LockAcquireFailedException.java | 40 - .../providers/AbstractClientProvider.java | 167 +--- .../slider/providers/ProviderService.java | 11 - .../docker/DockerClientProvider.java | 18 - .../server/appmaster/SliderAppMaster.java | 85 +- .../appmaster/actions/ActionFlexCluster.java | 1 - .../rpc/SliderClusterProtocolPBImpl.java | 69 -- .../rpc/SliderClusterProtocolProxy.java | 63 -- .../appmaster/rpc/SliderIPCService.java | 68 -- .../security/SecurityConfiguration.java | 237 +++--- .../server/appmaster/state/AppState.java | 63 -- .../appmaster/web/rest/AMWebServices.java | 9 +- .../resources/AggregateModelRefresher.java | 43 - .../resources/AppconfRefresher.java | 52 -- .../resources/LiveComponentsRefresher.java | 39 - .../resources/LiveContainersRefresher.java | 52 -- .../resources/LiveNodesRefresher.java | 41 - .../rest/management/ManagementResource.java | 94 --- .../management/resources/ActionsResource.java | 22 - .../resources/AggregateConfResource.java | 90 -- .../resources/ComponentResource.java | 53 -- .../resources/ConfTreeResource.java | 69 -- .../management/resources/ResourceFactory.java | 47 -- .../web/rest/publisher/PublisherResource.java | 7 +- .../main/proto/SliderClusterProtocol.proto | 28 - 53 files changed, 238 insertions(+), 5757 deletions(-) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/RestClientFactory.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceBuilder.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceIO.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AbstractInputPropertiesValidator.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AggregateConf.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTree.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTreeOperations.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/InputPropertiesValidator.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ResourcesInputPropertiesValidator.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveContainersRefresher.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveNodesRefresher.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ActionsResource.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/AggregateConfResource.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java deleted file mode 100644 index f8e5e7cd8d7..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java +++ /dev/null @@ -1,795 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.api; - -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.slider.api.types.ApplicationLivenessInformation; -import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.exceptions.BadConfigException; -import org.apache.slider.providers.SliderProviderFactory; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.JsonParseException; -import org.codehaus.jackson.annotate.JsonIgnore; -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.JsonMappingException; -import org.codehaus.jackson.map.ObjectMapper; -import org.codehaus.jackson.map.SerializationConfig; -import org.codehaus.jackson.map.annotate.JsonSerialize; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_HOME; -import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH; -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH; -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM; - -/** - * Represents a cluster specification; designed to be sendable over the wire - * and persisted in JSON by way of Jackson. - * - * When used in cluster status operations the info - * and statistics maps contain information about the cluster. - * - * As a wire format it is less efficient in both xfer and ser/deser than - * a binary format, but by having one unified format for wire and persistence, - * the code paths are simplified. - * - * This was the original single-file specification/model used in the Hoya - * precursor to Slider. Its now retained primarily as a way to publish - * the current state of the application, or at least a fraction thereof ... - * the larger set of information from the REST API is beyond the scope of - * this structure. - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) - -public class ClusterDescription implements Cloneable { - protected static final Logger - log = LoggerFactory.getLogger(ClusterDescription.class); - - private static final String UTF_8 = "UTF-8"; - - /** - * version counter - */ - public String version = "1.0"; - - /** - * Name of the cluster - */ - public String name; - - /** - * Type of cluster - */ - public String type = SliderProviderFactory.DEFAULT_CLUSTER_TYPE; - - /** - * State of the cluster - */ - public int state; - - /* - State list for both clusters and nodes in them. Ordered so that destroyed follows - stopped. - - Some of the states are only used for recording - the persistent state of the cluster and are not - seen in node descriptions - */ - - /** - * Specification is incomplete & cannot - * be used: {@value} - */ - public static final int STATE_INCOMPLETE = StateValues.STATE_INCOMPLETE; - - /** - * Spec has been submitted: {@value} - */ - public static final int STATE_SUBMITTED = StateValues.STATE_SUBMITTED; - /** - * Cluster created: {@value} - */ - public static final int STATE_CREATED = StateValues.STATE_CREATED; - /** - * Live: {@value} - */ - public static final int STATE_LIVE = StateValues.STATE_LIVE; - /** - * Stopped - */ - public static final int STATE_STOPPED = StateValues.STATE_STOPPED; - /** - * destroyed - */ - public static final int STATE_DESTROYED = StateValues.STATE_DESTROYED; - - /** - * When was the cluster specification created? - * This is not the time a cluster was thawed; that will - * be in the info section. - */ - public long createTime; - - /** - * When was the cluster specification last updated - */ - public long updateTime; - - /** - * URL path to the original configuration - * files; these are re-read when - * restoring a cluster - */ - - public String originConfigurationPath; - - /** - * URL path to the generated configuration - */ - public String generatedConfigurationPath; - - /** - * This is where the data goes - */ - public String dataPath; - - /** - * cluster-specific options -to control both - * the Slider AM and the application that it deploys - */ - public Map options = new HashMap<>(); - - /** - * cluster information - * This is only valid when querying the cluster status. - */ - public Map info = new HashMap<>(); - - /** - * Statistics. This is only relevant when querying the cluster status - */ - public Map> statistics = new HashMap<>(); - - /** - * Instances: role->count - */ - public Map> instances = new HashMap<>(); - - /** - * Role options, - * role -> option -> value - */ - public Map> roles = new HashMap<>(); - - - /** - * List of key-value pairs to add to a client config to set up the client - */ - public Map clientProperties = new HashMap<>(); - - /** - * Status information - */ - public Map status; - - /** - * Liveness information; the same as returned - * on the live/liveness/ URL - */ - public ApplicationLivenessInformation liveness; - - /** - * Creator. - */ - public ClusterDescription() { - } - - @Override - public String toString() { - try { - return toJsonString(); - } catch (Exception e) { - log.debug("Failed to convert CD to JSON ", e); - return super.toString(); - } - } - - /** - * Shallow clone - * @return a shallow clone - * @throws CloneNotSupportedException - */ - @Override - public Object clone() throws CloneNotSupportedException { - return super.clone(); - } - - /** - * A deep clone of the spec. This is done inefficiently with a ser/derser - * @return the cluster description - */ - public ClusterDescription deepClone() { - try { - return fromJson(toJsonString()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - - /** - * Save a cluster description to a hadoop filesystem - * @param fs filesystem - * @param path path - * @param overwrite should any existing file be overwritten - * @throws IOException IO exception - */ - public void save(FileSystem fs, Path path, boolean overwrite) throws - IOException { - FSDataOutputStream dataOutputStream = fs.create(path, overwrite); - writeJsonAsBytes(dataOutputStream); - } - - /** - * Save a cluster description to the local filesystem - * @param file file - * @throws IOException IO excpetion - */ - public void save(File file) throws IOException { - log.debug("Saving to {}", file.getAbsolutePath()); - if (!file.getParentFile().mkdirs()) { - log.warn("Failed to mkdirs for {}", file.getParentFile()); - } - DataOutputStream dataOutputStream = new DataOutputStream(new FileOutputStream(file)); - writeJsonAsBytes(dataOutputStream); - } - - /** - * Write the json as bytes -then close the file - * @param dataOutputStream an outout stream that will always be closed - * @throws IOException any failure - */ - private void writeJsonAsBytes(DataOutputStream dataOutputStream) - throws IOException { - try { - String json = toJsonString(); - byte[] b = json.getBytes(UTF_8); - dataOutputStream.write(b); - } finally { - dataOutputStream.close(); - } - } - - /** - * Load from the filesystem - * @param fs filesystem - * @param path path - * @return a loaded CD - * @throws IOException IO problems - */ - public static ClusterDescription load(FileSystem fs, Path path) - throws IOException, JsonParseException, JsonMappingException { - FileStatus status = fs.getFileStatus(path); - byte[] b = new byte[(int) status.getLen()]; - FSDataInputStream dataInputStream = fs.open(path); - int count = dataInputStream.read(b); - String json = new String(b, 0, count, UTF_8); - return fromJson(json); - } - - /** - * Make a deep copy of the class - * @param source source - * @return the copy - */ - public static ClusterDescription copy(ClusterDescription source) { - //currently the copy is done by a generate/save. Inefficient but it goes - //down the tree nicely - try { - return fromJson(source.toJsonString()); - } catch (IOException e) { - throw new RuntimeException("ClusterDescription copy failed " + e, e); - } - } - - /** - * Convert to a JSON string - * @return a JSON string description - * @throws IOException Problems mapping/writing the object - */ - public String toJsonString() throws IOException, - JsonGenerationException, - JsonMappingException { - ObjectMapper mapper = new ObjectMapper(); - mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true); - return mapper.writeValueAsString(this); - } - - /** - * Convert from JSON - * @param json input - * @return the parsed JSON - * @throws IOException IO - * @throws JsonMappingException failure to map from the JSON to this class - */ - public static ClusterDescription fromJson(String json) - throws IOException, JsonParseException, JsonMappingException { - ObjectMapper mapper = new ObjectMapper(); - try { - return mapper.readValue(json, ClusterDescription.class); - } catch (IOException e) { - log.error("Exception while parsing json : " + e + "\n" + json, e); - throw e; - } - } - - /** - * Convert from input stream - * @param is input stream of cluster description - * @return the parsed JSON - * @throws IOException IO - * @throws JsonMappingException failure to map from the JSON to this class - */ - public static ClusterDescription fromStream(InputStream is) - throws IOException, JsonParseException, JsonMappingException { - if (is==null) { - throw new FileNotFoundException("Empty Stream"); - } - ObjectMapper mapper = new ObjectMapper(); - try { - return mapper.readValue(is, ClusterDescription.class); - } catch (IOException e) { - log.error("Exception while parsing input stream : {}", e, e); - throw e; - } - } - - /** - * Convert from a JSON file - * @param jsonFile input file - * @return the parsed JSON - * @throws IOException IO problems - * @throws JsonMappingException failure to map from the JSON to this class - */ - public static ClusterDescription fromFile(File jsonFile) - throws IOException, JsonParseException, JsonMappingException { - ObjectMapper mapper = new ObjectMapper(); - try { - return mapper.readValue(jsonFile, ClusterDescription.class); - } catch (IOException e) { - log.error("Exception while parsing json file {}" , jsonFile, e); - throw e; - } - } - - /** - * Set a cluster option: a key val pair in the options {} section - * @param key key option name - * @param val value option value - */ - public void setOption(String key, String val) { - options.put(key, val); - } - - /** - * Set a cluster option if it is unset. If it is already set, - * in the Cluster Description, it is left alone - * @param key key key to query/set - * @param val value value - */ - - public void setOptionifUnset(String key, String val) { - if (options.get(key) == null) { - options.put(key, val); - } - } - - /** - * Set an integer option -it's converted to a string before saving - * @param option option name - * @param val integer value - */ - public void setOption(String option, int val) { - setOption(option, Integer.toString(val)); - } - - /** - * Set a boolean option - * @param option option name - * @param val bool value - */ - public void setOption(String option, boolean val) { - setOption(option, Boolean.toString(val)); - } - - /** - * Get a cluster option or value - * - * @param key option key - * @param defVal option val - * @return resolved value or default - */ - public String getOption(String key, String defVal) { - String val = options.get(key); - return val != null ? val : defVal; - } - - /** - * Get a cluster option or value - * - * @param key mandatory key - * @return the value - * @throws BadConfigException if the option is missing - */ - public String getMandatoryOption(String key) throws BadConfigException { - String val = options.get(key); - if (val == null) { - throw new BadConfigException("Missing option " + key); - } - return val; - } - - /** - * Get an integer option; use {@link Integer#decode(String)} so as to take hex - * oct and bin values too. - * - * @param option option name - * @param defVal default value - * @return parsed value - * @throws NumberFormatException if the role could not be parsed. - */ - public int getOptionInt(String option, int defVal) { - String val = getOption(option, Integer.toString(defVal)); - return Integer.decode(val); - } - - /** - * Verify that an option is set: that is defined AND non-empty - * @param key key to verify - * @throws BadConfigException - */ - public void verifyOptionSet(String key) throws BadConfigException { - if (SliderUtils.isUnset(getOption(key, null))) { - throw new BadConfigException("Unset cluster option %s", key); - } - } - - /** - * Get an option as a boolean. Note that {@link Boolean#valueOf(String)} - * is used for parsing -its policy of what is true vs false applies. - * @param option name - * @param defVal default - * @return the option. - */ - public boolean getOptionBool(String option, boolean defVal) { - return Boolean.valueOf(getOption(option, Boolean.toString(defVal))); - } - - /** - * Get a role option - * @param role role to get from - * @param option option name - * @param defVal default value - * @return resolved value - */ - public String getRoleOpt(String role, String option, String defVal) { - Map roleopts = getRole(role); - if (roleopts == null) { - return defVal; - } - String val = roleopts.get(option); - return val != null ? val : defVal; - } - - /** - * Get a mandatory role option - * @param role role to get from - * @param option option name - * @return resolved value - * @throws BadConfigException if the option is not defined - */ - public String getMandatoryRoleOpt(String role, String option) throws - BadConfigException { - Map roleopts = getRole(role); - if (roleopts == null) { - throw new BadConfigException("Missing role %s ", role); - } - String val = roleopts.get(option); - if (val == null) { - throw new BadConfigException("Missing option '%s' in role %s ", option, role); - } - return val; - } - - /** - * Get a mandatory integer role option - * @param role role to get from - * @param option option name - * @return resolved value - * @throws BadConfigException if the option is not defined - */ - public int getMandatoryRoleOptInt(String role, String option) - throws BadConfigException { - getMandatoryRoleOpt(role, option); - return getRoleOptInt(role, option, 0); - } - - /** - * look up a role and return its options - * @param role role - * @return role mapping or null - */ - public Map getRole(String role) { - return roles.get(role); - } - - /** - * Get a role -adding it to the roleopts map if - * none with that name exists - * @param role role - * @return role mapping - */ - public Map getOrAddRole(String role) { - Map map = getRole(role); - if (map == null) { - map = new HashMap<>(); - } - roles.put(role, map); - return map; - } - - /* - * return the Set of role names - */ - @JsonIgnore - public Set getRoleNames() { - return new HashSet<>(roles.keySet()); - } - - /** - * Get a role whose presence is mandatory - * @param role role name - * @return the mapping - * @throws BadConfigException if the role is not there - */ - public Map getMandatoryRole(String role) throws - BadConfigException { - Map roleOptions = getRole(role); - if (roleOptions == null) { - throw new BadConfigException("Missing role " + role); - } - return roleOptions; - } - - /** - * Get an integer role option; use {@link Integer#decode(String)} so as to take hex - * oct and bin values too. - * - * @param role role to get from - * @param option option name - * @param defVal default value - * @return parsed value - * @throws NumberFormatException if the role could not be parsed. - */ - public int getRoleOptInt(String role, String option, int defVal) { - String val = getRoleOpt(role, option, Integer.toString(defVal)); - return Integer.decode(val); - } - - /** - * Get an integer role option; use {@link Integer#decode(String)} so as to take hex - * oct and bin values too. - * - * @param role role to get from - * @param option option name - * @param defVal default value - * @return parsed value - * @throws NumberFormatException if the role could not be parsed. - */ - public long getRoleOptLong(String role, String option, long defVal) { - String val = getRoleOpt(role, option, Long.toString(defVal)); - return Long.decode(val); - } - - /** - * Set a role option, creating the role if necessary - * @param role role name - * @param option option name - * @param val value - */ - public void setRoleOpt(String role, String option, String val) { - Map roleopts = getOrAddRole(role); - roleopts.put(option, val); - } - - /** - * Set an integer role option, creating the role if necessary - * @param role role name - * @param option option name - * @param val integer value - */ - public void setRoleOpt(String role, String option, int val) { - setRoleOpt(role, option, Integer.toString(val)); - } - - /** - * Set a role option of any object, using its string value. - * This works for (Boxed) numeric values as well as other objects - * @param role role name - * @param option option name - * @param val non-null value - */ - public void setRoleOpt(String role, String option, Object val) { - setRoleOpt(role, option, val.toString()); - } - - /** - * Get the value of a role requirement (cores, RAM, etc). - * These are returned as integers, but there is special handling of the - * string {@link ResourceKeys#YARN_RESOURCE_MAX}, which triggers - * the return of the maximum value. - * @param role role to get from - * @param option option name - * @param defVal default value - * @param maxVal value to return if the max val is requested - * @return parsed value - * @throws NumberFormatException if the role could not be parsed. - */ - public int getRoleResourceRequirement(String role, String option, int defVal, int maxVal) { - String val = getRoleOpt(role, option, Integer.toString(defVal)); - Integer intVal; - if (ResourceKeys.YARN_RESOURCE_MAX.equals(val)) { - intVal = maxVal; - } else { - intVal = Integer.decode(val); - } - return intVal; - } - - - /** - * Set the time for an information (human, machine) timestamp pair of fields. - * The human time is the time in millis converted via the {@link Date} class. - * @param keyHumanTime name of human time key - * @param keyMachineTime name of machine time - * @param time timestamp - */ - - public void setInfoTime(String keyHumanTime, String keyMachineTime, long time) { - SliderUtils.setInfoTime(info, keyHumanTime, keyMachineTime, time); - } - - /** - * Set an information string. This is content that is only valid in status - * reports. - * @param key key - * @param value string value - */ - @JsonIgnore - public void setInfo(String key, String value) { - info.put(key, value); - } - - /** - * Get an information string. This is content that is only valid in status - * reports. - * @param key key - * @return the value or null - */ - @JsonIgnore - public String getInfo(String key) { - return info.get(key); - } - - /** - * Get an information string. This is content that is only valid in status - * reports. - * @param key key - * @return the value or null - */ - @JsonIgnore - public boolean getInfoBool(String key) { - String val = info.get(key); - if (val != null) { - return Boolean.valueOf(val); - } - return false; - } - - @JsonIgnore - public String getZkHosts() throws BadConfigException { - return getMandatoryOption(ZOOKEEPER_QUORUM); - } - - /** - * Set the hosts for the ZK quorum - * @param zkHosts a comma separated list of hosts - */ - @JsonIgnore - public void setZkHosts(String zkHosts) { - setOption(ZOOKEEPER_QUORUM, zkHosts); - } - - @JsonIgnore - public String getZkPath() throws BadConfigException { - return getMandatoryOption(ZOOKEEPER_PATH); - } - - @JsonIgnore - public void setZkPath(String zkPath) { - setOption(ZOOKEEPER_PATH, zkPath); - } - - /** - * HBase home: if non-empty defines where a copy of HBase is preinstalled - */ - @JsonIgnore - public String getApplicationHome() { - return getOption(INTERNAL_APPLICATION_HOME, ""); - } - - @JsonIgnore - public void setApplicationHome(String applicationHome) { - setOption(INTERNAL_APPLICATION_HOME, applicationHome); - } - - /** - * The path in HDFS where the HBase image is - */ - @JsonIgnore - public String getImagePath() { - return getOption(INTERNAL_APPLICATION_IMAGE_PATH, ""); - } - - /** - * Set the path in HDFS where the HBase image is - */ - @JsonIgnore - public void setImagePath(String imagePath) { - setOption(INTERNAL_APPLICATION_IMAGE_PATH, imagePath); - } - - /** - * Query for the image path being set (non null/non empty) - * @return true if there is a path in the image path option - */ - @JsonIgnore - public boolean isImagePathSet() { - return SliderUtils.isSet(getImagePath()); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java deleted file mode 100644 index 5b7a92a69c2..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.api; - -public class ClusterDescriptionKeys { - - public static final String KEY_CLUSTER_LIVE = "live"; - public static final String KEY_CLUSTER_FAILED = "failed"; -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java deleted file mode 100644 index 5b954143c38..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.api; - -import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.MapOperations; -import org.apache.slider.core.exceptions.BadConfigException; -import org.apache.slider.providers.SliderProviderFactory; - -import java.util.Map; - -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH; -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM; - -/** - * Operations on Cluster Descriptions - */ -public class ClusterDescriptionOperations { - - - public static ClusterDescription buildFromInstanceDefinition(AggregateConf aggregateConf) throws - BadConfigException { - - ClusterDescription cd = new ClusterDescription(); - - aggregateConf.resolve(); - - //options are a merge of all globals - Map options = cd.options; - SliderUtils.mergeMapsIgnoreDuplicateKeys(options, - aggregateConf.getInternal().global); - SliderUtils.mergeMapsIgnoreDuplicateKeys(options, - aggregateConf.getAppConf().global); - SliderUtils.mergeMapsIgnoreDuplicateKeys(options, - aggregateConf.getResources().global); - - //roles are the role values merged in the same order - mergeInComponentMap(cd, aggregateConf.getInternal()); - mergeInComponentMap(cd, aggregateConf.getAppConf()); - mergeInComponentMap(cd, aggregateConf.getResources()); - - //now add the extra bits - cd.state = ClusterDescription.STATE_LIVE; - MapOperations internalOptions = - aggregateConf.getInternalOperations().getGlobalOptions(); - MapOperations appOptions = - aggregateConf.getAppConfOperations().getGlobalOptions(); - - cd.type = internalOptions.getOption(InternalKeys.INTERNAL_PROVIDER_NAME, - SliderProviderFactory.DEFAULT_CLUSTER_TYPE); - - cd.dataPath = internalOptions.get(InternalKeys.INTERNAL_DATA_DIR_PATH); - cd.name = internalOptions.get(OptionKeys.APPLICATION_NAME); - cd.originConfigurationPath = internalOptions.get(InternalKeys.INTERNAL_SNAPSHOT_CONF_PATH); - cd.generatedConfigurationPath = internalOptions.get(InternalKeys.INTERNAL_GENERATED_CONF_PATH); - cd.setImagePath(internalOptions.get(InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH)); - cd.setApplicationHome(internalOptions.get(InternalKeys.INTERNAL_APPLICATION_HOME)); - cd.setZkPath(appOptions.get(ZOOKEEPER_PATH)); - cd.setZkHosts(appOptions.get(ZOOKEEPER_QUORUM)); - - return cd; - } - - private static void mergeInComponentMap(ClusterDescription cd, - ConfTree confTree) { - - Map> components = confTree.components; - for (Map.Entry> compEntry : components.entrySet()) { - String name = compEntry.getKey(); - Map destRole = cd.getOrAddRole(name); - Map sourceComponent = compEntry.getValue(); - SliderUtils.mergeMapsIgnoreDuplicateKeys(destRole, sourceComponent); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java deleted file mode 100644 index f6a2cc90205..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.api; - -import org.apache.slider.api.types.ApplicationLivenessInformation; -import org.apache.slider.api.types.ComponentInformation; -import org.apache.slider.api.types.ContainerInformation; -import org.apache.slider.api.types.NodeInformation; -import org.apache.slider.api.types.NodeInformationList; -import org.apache.slider.api.types.PingInformation; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.ConfTreeOperations; - -import java.io.IOException; -import java.util.Map; - -/** - * API exported by the slider remote REST/IPC endpoints. - */ -public interface SliderApplicationApi { - /** - * Get the aggregate desired model - * @return the aggregate configuration of what was asked for - * -before resolution has taken place - * @throws IOException on any failure - */ - AggregateConf getDesiredModel() throws IOException; - - /** - * Get the desired application configuration - * @return the application configuration asked for - * -before resolution has taken place - * @throws IOException on any failure - */ - ConfTreeOperations getDesiredAppconf() throws IOException; - - /** - * Get the desired YARN resources - * @return the resources asked for - * -before resolution has taken place - * @throws IOException on any failure - */ - ConfTreeOperations getDesiredResources() throws IOException; - - /** - * Get the aggregate resolved model - * @return the aggregate configuration of what was asked for - * -after resolution has taken place - * @throws IOException on any failure - */ - AggregateConf getResolvedModel() throws IOException; - - /** - * Get the resolved application configuration - * @return the application configuration asked for - * -after resolution has taken place - * @throws IOException on any failure - */ - ConfTreeOperations getResolvedAppconf() throws IOException; - - /** - * Get the resolved YARN resources - * @return the resources asked for - * -after resolution has taken place - * @throws IOException on any failure - */ - ConfTreeOperations getResolvedResources() throws IOException; - - /** - * Get the live YARN resources - * @return the live set of resources in the cluster - * @throws IOException on any failure - */ - ConfTreeOperations getLiveResources() throws IOException; - - /** - * Get a map of live containers [containerId:info] - * @return a possibly empty list of serialized containers - * @throws IOException on any failure - */ - Map enumContainers() throws IOException; - - /** - * Get a container from the container Id - * @param containerId YARN container ID - * @return the container information - * @throws IOException on any failure - */ - ContainerInformation getContainer(String containerId) throws IOException; - - /** - * List all components into a map of [name:info] - * @return a possibly empty map of components - * @throws IOException on any failure - */ - Map enumComponents() throws IOException; - - /** - * Get information about a component - * @param componentName name of the component - * @return the component details - * @throws IOException on any failure - */ - ComponentInformation getComponent(String componentName) throws IOException; - - /** - * List all nodes into a map of [name:info] - * @return a possibly empty list of nodes - * @throws IOException on any failure - */ - NodeInformationList getLiveNodes() throws IOException; - - /** - * Get information about a node - * @param hostname name of the node - * @return the node details - * @throws IOException on any failure - */ - NodeInformation getLiveNode(String hostname) throws IOException; - - /** - * Ping as a GET - * @param text text to include - * @return the response - * @throws IOException on any failure - */ - PingInformation ping(String text) throws IOException; - - /** - * Stop the AM (async operation) - * @param text text to include - * @throws IOException on any failure - */ - void stop(String text) throws IOException; - - /** - * Get the application liveness - * @return current liveness information - * @throws IOException - */ - ApplicationLivenessInformation getApplicationLiveness() throws IOException; -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java index f384927eb9f..7f768b98105 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java @@ -149,18 +149,4 @@ public interface SliderClusterProtocol extends VersionedProtocol { Messages.NodeInformationProto getLiveNode( Messages.GetLiveNodeRequestProto request ) throws IOException; - - Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException; - - Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException; - - Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException; - - Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException; - - Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException; - - Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException; - - Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java index 9879d053df8..687edd28358 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java @@ -36,9 +36,6 @@ public class ApplicationLivenessInformation { /** number of outstanding requests: those needed to satisfy */ public int requestsOutstanding; - /** number of requests submitted to YARN */ - public int activeRequests; - @Override public String toString() { final StringBuilder sb = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java index 713cffd5434..bc3d52674a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java @@ -19,18 +19,7 @@ package org.apache.slider.api.types; import org.apache.slider.api.proto.Messages; -import org.apache.slider.api.types.ApplicationLivenessInformation; -import org.apache.slider.api.types.ComponentInformation; -import org.apache.slider.api.types.ContainerInformation; -import org.apache.slider.api.types.NodeEntryInformation; -import org.apache.slider.api.types.NodeInformation; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.persist.AggregateConfSerDeser; -import org.apache.slider.core.persist.ConfTreeSerDeser; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -265,20 +254,4 @@ public class RestTypeMarshalling { public static String unmarshall(Messages.WrappedJsonProto wire) { return wire.getJson(); } - - public static ConfTree unmarshallToConfTree(Messages.WrappedJsonProto wire) throws - IOException { - return new ConfTreeSerDeser().fromJson(wire.getJson()); - } - - public static ConfTreeOperations unmarshallToCTO(Messages.WrappedJsonProto wire) throws - IOException { - return new ConfTreeOperations(new ConfTreeSerDeser().fromJson(wire.getJson())); - } - - public static AggregateConf unmarshallToAggregateConf(Messages.WrappedJsonProto wire) throws - IOException { - return new AggregateConfSerDeser().fromJson(wire.getJson()); - } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java index f4ea70b3703..8bceddfad59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java @@ -46,8 +46,6 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.KerberosDiags; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.alias.CredentialProvider; -import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; @@ -59,8 +57,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; -import org.apache.hadoop.yarn.api.records.NodeReport; -import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.YarnClientApplication; @@ -72,14 +68,12 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Times; import org.apache.slider.api.ClusterNode; -import org.apache.slider.api.SliderApplicationApi; import org.apache.slider.api.SliderClusterProtocol; import org.apache.slider.api.proto.Messages; import org.apache.slider.api.resource.Application; import org.apache.slider.api.resource.Component; import org.apache.slider.api.types.ContainerInformation; import org.apache.slider.api.types.NodeInformationList; -import org.apache.slider.client.ipc.SliderApplicationIpcClient; import org.apache.slider.client.ipc.SliderClusterOperations; import org.apache.slider.common.Constants; import org.apache.slider.common.SliderExitCodes; @@ -119,9 +113,6 @@ import org.apache.slider.common.tools.ConfigHelper; import org.apache.slider.common.tools.SliderFileSystem; import org.apache.slider.common.tools.SliderUtils; import org.apache.slider.common.tools.SliderVersionInfo; -import org.apache.slider.core.buildutils.InstanceIO; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; import org.apache.slider.core.exceptions.BadClusterStateException; import org.apache.slider.core.exceptions.BadCommandArgumentsException; import org.apache.slider.core.exceptions.BadConfigException; @@ -178,8 +169,6 @@ import java.io.InterruptedIOException; import java.io.OutputStreamWriter; import java.io.PrintStream; import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.Writer; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; @@ -199,7 +188,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.apache.hadoop.registry.client.binding.RegistryUtils.*; -import static org.apache.slider.api.InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH; import static org.apache.slider.common.Constants.HADOOP_JAAS_DEBUG; import static org.apache.slider.common.params.SliderActions.*; import static org.apache.slider.common.tools.SliderUtils.*; @@ -253,7 +241,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe */ private SliderYarnClientImpl yarnClient; private YarnAppListClient yarnAppListClient; - private AggregateConf launchedInstanceDefinition; /** * The YARN registry service @@ -942,43 +929,43 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe return 0; } - protected static void checkForCredentials(Configuration conf, - ConfTree tree, String clusterName) throws IOException { - if (tree.credentials == null || tree.credentials.isEmpty()) { - log.info("No credentials requested"); - return; - } - - Console console = System.console(); - for (Entry> cred : tree.credentials.entrySet()) { - String provider = cred.getKey() - .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName) - .replaceAll(Pattern.quote("${CLUSTER}"), clusterName); - List aliases = cred.getValue(); - if (aliases == null || aliases.isEmpty()) { - continue; - } - Configuration c = new Configuration(conf); - c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider); - CredentialProvider credentialProvider = CredentialProviderFactory.getProviders(c).get(0); - Set existingAliases = new HashSet<>(credentialProvider.getAliases()); - for (String alias : aliases) { - if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) { - log.info("Credentials for " + alias + " found in " + provider); - } else { - if (console == null) { - throw new IOException("Unable to input password for " + alias + - " because System.console() is null; provider " + provider + - " must be populated manually"); - } - char[] pass = readPassword(alias, console); - credentialProvider.createCredentialEntry(alias, pass); - credentialProvider.flush(); - Arrays.fill(pass, ' '); - } - } - } - } +// protected static void checkForCredentials(Configuration conf, +// ConfTree tree, String clusterName) throws IOException { +// if (tree.credentials == null || tree.credentials.isEmpty()) { +// log.info("No credentials requested"); +// return; +// } +// +// Console console = System.console(); +// for (Entry> cred : tree.credentials.entrySet()) { +// String provider = cred.getKey() +// .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName) +// .replaceAll(Pattern.quote("${CLUSTER}"), clusterName); +// List aliases = cred.getValue(); +// if (aliases == null || aliases.isEmpty()) { +// continue; +// } +// Configuration c = new Configuration(conf); +// c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider); +// CredentialProvider credentialProvider = CredentialProviderFactory.getProviders(c).get(0); +// Set existingAliases = new HashSet<>(credentialProvider.getAliases()); +// for (String alias : aliases) { +// if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) { +// log.info("Credentials for " + alias + " found in " + provider); +// } else { +// if (console == null) { +// throw new IOException("Unable to input password for " + alias + +// " because System.console() is null; provider " + provider + +// " must be populated manually"); +// } +// char[] pass = readPassword(alias, console); +// credentialProvider.createCredentialEntry(alias, pass); +// credentialProvider.flush(); +// Arrays.fill(pass, ' '); +// } +// } +// } +// } private static char[] readPassword(String alias, Console console) throws IOException { @@ -1347,56 +1334,57 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe } private int actionPackageInstances() throws YarnException, IOException { - Map persistentInstances = sliderFileSystem - .listPersistentInstances(); - if (persistentInstances.isEmpty()) { - log.info("No slider cluster specification available"); - return EXIT_SUCCESS; - } - String pkgPathValue = sliderFileSystem - .buildPackageDirPath(StringUtils.EMPTY, StringUtils.EMPTY).toUri() - .getPath(); - FileSystem fs = sliderFileSystem.getFileSystem(); - Iterator> instanceItr = persistentInstances - .entrySet().iterator(); - log.info("List of applications with its package name and path"); - println("%-25s %15s %30s %s", "Cluster Name", "Package Name", - "Package Version", "Application Location"); - while(instanceItr.hasNext()) { - Map.Entry entry = instanceItr.next(); - String clusterName = entry.getKey(); - Path clusterPath = entry.getValue(); - AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved( - clusterName, clusterPath); - Path appDefPath = null; - try { - appDefPath = new Path( - getApplicationDefinitionPath(instanceDefinition - .getAppConfOperations())); - } catch (BadConfigException e) { - // Invalid cluster state, so move on to next. No need to log anything - // as this is just listing of instances. - continue; - } - if (!appDefPath.isUriPathAbsolute()) { - appDefPath = new Path(fs.getHomeDirectory(), appDefPath); - } - String appDefPathStr = appDefPath.toUri().toString(); - try { - if (appDefPathStr.contains(pkgPathValue) && fs.isFile(appDefPath)) { - String packageName = appDefPath.getParent().getName(); - String packageVersion = StringUtils.EMPTY; - if (instanceDefinition.isVersioned()) { - packageVersion = packageName; - packageName = appDefPath.getParent().getParent().getName(); - } - println("%-25s %15s %30s %s", clusterName, packageName, - packageVersion, appDefPathStr); - } - } catch (IOException e) { - log.debug("{} application definition path {} is not found.", clusterName, appDefPathStr); - } - } +// Map persistentInstances = sliderFileSystem +// .listPersistentInstances(); +// if (persistentInstances.isEmpty()) { +// log.info("No slider cluster specification available"); +// return EXIT_SUCCESS; +// } +// String pkgPathValue = sliderFileSystem +// .buildPackageDirPath(StringUtils.EMPTY, StringUtils.EMPTY).toUri() +// .getPath(); +// FileSystem fs = sliderFileSystem.getFileSystem(); +// Iterator> instanceItr = persistentInstances +// .entrySet().iterator(); +// log.info("List of applications with its package name and path"); +// println("%-25s %15s %30s %s", "Cluster Name", "Package Name", +// "Package Version", "Application Location"); + //TODO deal with packages +// while(instanceItr.hasNext()) { +// Map.Entry entry = instanceItr.next(); +// String clusterName = entry.getKey(); +// Path clusterPath = entry.getValue(); +// AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved( +// clusterName, clusterPath); +// Path appDefPath = null; +// try { +// appDefPath = new Path( +// getApplicationDefinitionPath(instanceDefinition +// .getAppConfOperations())); +// } catch (BadConfigException e) { +// // Invalid cluster state, so move on to next. No need to log anything +// // as this is just listing of instances. +// continue; +// } +// if (!appDefPath.isUriPathAbsolute()) { +// appDefPath = new Path(fs.getHomeDirectory(), appDefPath); +// } +// String appDefPathStr = appDefPath.toUri().toString(); +// try { +// if (appDefPathStr.contains(pkgPathValue) && fs.isFile(appDefPath)) { +// String packageName = appDefPath.getParent().getName(); +// String packageVersion = StringUtils.EMPTY; +// if (instanceDefinition.isVersioned()) { +// packageVersion = packageName; +// packageName = appDefPath.getParent().getParent().getName(); +// } +// println("%-25s %15s %30s %s", clusterName, packageName, +// packageVersion, appDefPathStr); +// } +// } catch (IOException e) { +// log.debug("{} application definition path {} is not found.", clusterName, appDefPathStr); +// } +// } return EXIT_SUCCESS; } @@ -1565,29 +1553,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe return newTimeout; } - /** - * Load the instance definition. It is not resolved at this point - * @param name cluster name - * @param clusterDirectory cluster dir - * @return the loaded configuration - * @throws IOException - * @throws SliderException - * @throws UnknownApplicationInstanceException if the file is not found - */ - public AggregateConf loadInstanceDefinitionUnresolved(String name, - Path clusterDirectory) throws IOException, SliderException { - - try { - AggregateConf definition = - InstanceIO.loadInstanceDefinitionUnresolved(sliderFileSystem, - clusterDirectory); - definition.setName(name); - return definition; - } catch (FileNotFoundException e) { - throw UnknownApplicationInstanceException.unknownInstance(name, e); - } - } - protected Map getAmLaunchEnv(Configuration config) { String sliderAmLaunchEnv = config.get(KEY_AM_LAUNCH_ENV); log.debug("{} = {}", KEY_AM_LAUNCH_ENV, sliderAmLaunchEnv); @@ -2236,55 +2201,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe return clusterOperations.getApplication(); } - /** - * List all node UUIDs in a role - * @param role role name or "" for all - * @return an array of UUID strings - * @throws IOException - * @throws YarnException - */ - @VisibleForTesting - public String[] listNodeUUIDsByRole(String role) throws - IOException, - YarnException { - return createClusterOperations() - .listNodeUUIDsByRole(role); - } - - /** - * List all nodes in a role. This is a double round trip: once to list - * the nodes in a role, another to get their details - * @param role component/role to look for - * @return an array of ContainerNode instances - * @throws IOException - * @throws YarnException - */ - @VisibleForTesting - public List listClusterNodesInRole(String role) throws - IOException, - YarnException { - return createClusterOperations().listClusterNodesInRole(role); - } - - /** - * Get the details on a list of uuids - * @param uuids uuids to ask for - * @return a possibly empty list of node details - * @throws IOException - * @throws YarnException - */ - @VisibleForTesting - public List listClusterNodes(String[] uuids) throws - IOException, - YarnException { - - if (uuids.length == 0) { - // short cut on an empty list - return new LinkedList<>(); - } - return createClusterOperations().listClusterNodes(uuids); - } - /** * Bond to a running cluster * @param clustername cluster name @@ -2319,39 +2235,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe return new SliderClusterOperations(sliderAM); } - /** - * Create a cluster operations instance against the active cluster - * -returning any previous created one if held. - * @return a bonded cluster operations instance - * @throws YarnException YARN issues - * @throws IOException IO problems - */ - private SliderClusterOperations createClusterOperations() throws - YarnException, - IOException { - if (sliderClusterOperations == null) { - sliderClusterOperations = - createClusterOperations(getDeployedClusterName()); - } - return sliderClusterOperations; - } - - /** - * Wait for an instance of a named role to be live (or past it in the lifecycle) - * @param role role to look for - * @param timeout time to wait - * @return the state. If still in CREATED, the cluster didn't come up - * in the time period. If LIVE, all is well. If >LIVE, it has shut for a reason - * @throws IOException IO - * @throws SliderException Slider - * @throws WaitTimeoutException if the wait timed out - */ - @VisibleForTesting - public int waitForRoleInstanceLive(String role, long timeout) - throws WaitTimeoutException, IOException, YarnException { - return createClusterOperations().waitForRoleInstanceLive(role, timeout); - } - /** * Generate an exception for an unknown cluster * @param clustername cluster name @@ -2546,11 +2429,18 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe if (diagnosticArgs.client) { actionDiagnosticClient(diagnosticArgs); } else if (diagnosticArgs.application) { - actionDiagnosticApplication(diagnosticArgs); + // TODO print configs of application - get from AM } else if (diagnosticArgs.yarn) { - actionDiagnosticYarn(diagnosticArgs); + // This method prints yarn nodes info and yarn configs. + // We can just use yarn node CLI instead which is much more richful + // for yarn configs, this method reads local config which is only client + // config not cluster configs. +// actionDiagnosticYarn(diagnosticArgs); } else if (diagnosticArgs.credentials) { - actionDiagnosticCredentials(); + // actionDiagnosticCredentials internall only runs a bare 'klist' command... + // IMHO, the user can just run klist on their own with extra options supported, don't + // actually see the point of this method. +// actionDiagnosticCredentials(); } else if (diagnosticArgs.all) { actionDiagnosticAll(diagnosticArgs); } else if (diagnosticArgs.level) { @@ -2571,122 +2461,11 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe throws IOException, YarnException { // assign application name from param to each sub diagnostic function actionDiagnosticClient(diagnosticArgs); - actionDiagnosticApplication(diagnosticArgs); - actionDiagnosticSlider(diagnosticArgs); - actionDiagnosticYarn(diagnosticArgs); - actionDiagnosticCredentials(); - } - - private void actionDiagnosticCredentials() throws BadConfigException, - IOException { - if (isHadoopClusterSecure(loadSliderClientXML())) { - String credentialCacheFileDescription = null; - try { - credentialCacheFileDescription = checkCredentialCacheFile(); - } catch (BadConfigException e) { - log.error("The credential config is not valid: " + e.toString()); - throw e; - } catch (IOException e) { - log.error("Unable to read the credential file: " + e.toString()); - throw e; - } - log.info("Credential cache file for the current user: " - + credentialCacheFileDescription); - } else { - log.info("the cluster is not in secure mode"); - } - } - - private void actionDiagnosticYarn(ActionDiagnosticArgs diagnosticArgs) - throws IOException, YarnException { - JSONObject converter = null; - log.info("the node in the YARN cluster has below state: "); - List yarnClusterInfo; - try { - yarnClusterInfo = yarnClient.getNodeReports(NodeState.RUNNING); - } catch (YarnException e1) { - log.error("Exception happened when fetching node report from the YARN cluster: " - + e1.toString()); - throw e1; - } catch (IOException e1) { - log.error("Network problem happened when fetching node report YARN cluster: " - + e1.toString()); - throw e1; - } - for (NodeReport nodeReport : yarnClusterInfo) { - log.info(nodeReport.toString()); - } - - if (diagnosticArgs.verbose) { - Writer configWriter = new StringWriter(); - try { - Configuration.dumpConfiguration(yarnClient.getConfig(), configWriter); - } catch (IOException e1) { - log.error("Network problem happened when retrieving YARN config from YARN: " - + e1.toString()); - throw e1; - } - try { - converter = new JSONObject(configWriter.toString()); - log.info("the configuration of the YARN cluster is: " - + converter.toString(2)); - - } catch (JSONException e) { - log.error("JSONException happened during parsing response from YARN: " - + e.toString()); - } - } - } - - private void actionDiagnosticSlider(ActionDiagnosticArgs diagnosticArgs) - throws YarnException, IOException { - // not using member variable clustername because we want to place - // application name after --application option and member variable - // cluster name has to be put behind action - String clusterName = diagnosticArgs.name; - if(isUnset(clusterName)){ - throw new BadCommandArgumentsException("application name must be provided with --name option"); - } - AggregateConf instanceDefinition = new AggregateConf(); - String imagePath = instanceDefinition.getInternalOperations().get( - INTERNAL_APPLICATION_IMAGE_PATH); - // if null, it will be uploaded by Slider and thus at slider's path - if (imagePath == null) { - ApplicationReport appReport = findInstance(clusterName); - if (appReport != null) { - Path path1 = sliderFileSystem.getTempPathForCluster(clusterName); - Path subPath = new Path(path1, appReport.getApplicationId().toString() - + "/agent"); - imagePath = subPath.toString(); - } - } - log.info("The path of slider agent tarball on HDFS is: " + imagePath); - } - - private void actionDiagnosticApplication(ActionDiagnosticArgs diagnosticArgs) - throws YarnException, IOException { - // not using member variable clustername because we want to place - // application name after --application option and member variable - // cluster name has to be put behind action - String clusterName = diagnosticArgs.name; - requireArgumentSet(Arguments.ARG_NAME, clusterName); - AggregateConf instanceDefinition = new AggregateConf(); - String clusterDir = instanceDefinition.getAppConfOperations() - .getGlobalOptions().get(AgentKeys.APP_ROOT); - String pkgTarball = getApplicationDefinitionPath(instanceDefinition.getAppConfOperations()); - String runAsUser = instanceDefinition.getAppConfOperations() - .getGlobalOptions().get(AgentKeys.RUNAS_USER); - - log.info("The location of the cluster instance directory in HDFS is: {}", clusterDir); - log.info("The name of the application package tarball on HDFS is: {}",pkgTarball); - log.info("The runas user of the application in the cluster is: {}",runAsUser); - - if (diagnosticArgs.verbose) { - log.info("App config of the application:\n{}", - instanceDefinition.getAppConf().toJson()); - log.info("Resource config of the application:\n{}", - instanceDefinition.getResources().toJson()); - } + // actionDiagnosticSlider only prints the agent location on hdfs, + // which is invalid now. + // actionDiagnosticCredentials only runs 'klist' command, IMHO, the user + // can just run klist on its own with extra options supported, don't + // actually see the point of this method. } private void actionDiagnosticClient(ActionDiagnosticArgs diagnosticArgs) @@ -3241,16 +3020,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe return 0; } - /** - * Create a new IPC client for talking to slider via what follows the REST API. - * Client must already be bonded to the cluster - * @return a new IPC client - */ - public SliderApplicationApi createIpcClient() - throws IOException, YarnException { - return new SliderApplicationIpcClient(createClusterOperations()); - } - /** * Save/list tokens. This is for testing oozie integration * @param args commands diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java deleted file mode 100644 index 3b5147ff368..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.client.ipc; - -import com.google.common.base.Preconditions; -import org.apache.slider.api.SliderClusterProtocol; -import org.apache.slider.api.types.ApplicationLivenessInformation; -import org.apache.slider.api.types.ComponentInformation; -import org.apache.slider.api.types.ContainerInformation; -import org.apache.slider.api.types.NodeInformation; -import org.apache.slider.api.types.NodeInformationList; -import org.apache.slider.api.types.PingInformation; -import org.apache.slider.api.SliderApplicationApi; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.exceptions.NoSuchNodeException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Map; - -/** - * Implementation of the Slider RESTy Application API over IPC. - *

- * Operations are executed via the {@link SliderClusterOperations} - * instance passed in; raised exceptions may be converted into ones - * consistent with the REST API. - */ -public class SliderApplicationIpcClient implements SliderApplicationApi { - - private static final Logger log = - LoggerFactory.getLogger(SliderApplicationIpcClient.class); - - private final SliderClusterOperations operations; - - public SliderApplicationIpcClient(SliderClusterOperations operations) { - Preconditions.checkArgument(operations != null, "null operations"); - this.operations = operations; - } - - /** - * Convert received (And potentially unmarshalled) local/remote - * exceptions into the equivalents in the REST API. - * Best effort. - *

- * If there is no translation, the original exception is returned. - *

- * If a new exception was created, it will have the message of the - * string value of the original exception, and that original - * exception will be the nested cause of this one - * @param exception IOException to convert - * @return an exception to throw - */ - private IOException convert(IOException exception) { - IOException result = exception; - if (exception instanceof NoSuchNodeException) { - result = new FileNotFoundException(exception.toString()); - result.initCause(exception); - } else { - // TODO: remap any other exceptions - } - return result; - } - - public SliderApplicationIpcClient(SliderClusterProtocol proxy) { - this(new SliderClusterOperations(proxy)); - } - - @Override - public AggregateConf getDesiredModel() throws IOException { - try { - return operations.getModelDesired(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ConfTreeOperations getDesiredAppconf() throws IOException { - try { - return operations.getModelDesiredAppconf(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ConfTreeOperations getDesiredResources() throws IOException { - try { - return operations.getModelDesiredResources(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public AggregateConf getResolvedModel() throws IOException { - try { - return operations.getModelResolved(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ConfTreeOperations getResolvedAppconf() throws IOException { - try { - return operations.getModelResolvedAppconf(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ConfTreeOperations getResolvedResources() throws IOException { - try { - return operations.getModelResolvedResources(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ConfTreeOperations getLiveResources() throws IOException { - try { - return operations.getLiveResources(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public Map enumContainers() throws IOException { - try { - return operations.enumContainers(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ContainerInformation getContainer(String containerId) throws - IOException { - try { - return operations.getContainer(containerId); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public Map enumComponents() throws IOException { - try { - return operations.enumComponents(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ComponentInformation getComponent(String componentName) throws IOException { - try { - return operations.getComponent(componentName); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public NodeInformationList getLiveNodes() throws IOException { - try { - return operations.getLiveNodes(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public NodeInformation getLiveNode(String hostname) throws IOException { - try { - return operations.getLiveNode(hostname); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public PingInformation ping(String text) throws IOException { - return null; - } - - @Override - public void stop(String text) throws IOException { - try { - operations.stop(text); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public ApplicationLivenessInformation getApplicationLiveness() throws - IOException { - try { - return operations.getApplicationLiveness(); - } catch (IOException e) { - throw convert(e); - } - } - - @Override - public String toString() { - return "IPC implementation of SliderApplicationApi bonded to " + operations; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java index 623b8b08a35..3bb2af61197 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java @@ -20,27 +20,20 @@ package org.apache.slider.client.ipc; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.slider.api.ClusterDescription; import org.apache.slider.api.ClusterNode; import org.apache.slider.api.SliderClusterProtocol; import org.apache.slider.api.StateValues; import org.apache.slider.api.proto.Messages; import org.apache.slider.api.resource.Application; import org.apache.slider.api.resource.Component; -import org.apache.slider.api.types.ApplicationLivenessInformation; -import org.apache.slider.api.types.ComponentInformation; import org.apache.slider.api.types.ContainerInformation; import org.apache.slider.api.types.NodeInformation; import org.apache.slider.api.types.NodeInformationList; import org.apache.slider.api.types.PingInformation; import org.apache.slider.common.tools.Duration; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.ConfTreeOperations; import org.apache.slider.core.exceptions.NoSuchNodeException; import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.core.exceptions.WaitTimeoutException; -import org.apache.slider.core.persist.ConfTreeSerDeser; import org.apache.slider.core.persist.JsonSerDeser; import org.codehaus.jackson.JsonParseException; import org.slf4j.Logger; @@ -50,11 +43,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import static org.apache.slider.api.types.RestTypeMarshalling.*; +import static org.apache.slider.api.types.RestTypeMarshalling.unmarshall; /** * Cluster operations at a slightly higher level than the RPC code @@ -322,85 +313,6 @@ public class SliderClusterOperations { appMaster.amSuicide(req); } - /** - * Get the application liveness - * @return current liveness information - * @throws IOException - */ - public ApplicationLivenessInformation getLivenessInformation() throws IOException { - Messages.GetApplicationLivenessRequestProto.Builder builder = - Messages.GetApplicationLivenessRequestProto.newBuilder(); - Messages.ApplicationLivenessInformationProto wire = - appMaster.getLivenessInformation(builder.build()); - return unmarshall(wire); - - } - - public AggregateConf getModelDesired() throws IOException { - return unmarshallToAggregateConf(appMaster.getModelDesired(EMPTY)); - } - - - public ConfTreeOperations getModelDesiredAppconf() throws IOException { - return unmarshallToCTO(appMaster.getModelDesiredAppconf(EMPTY)); - } - - - public ConfTreeOperations getModelDesiredResources() throws IOException { - return unmarshallToCTO(appMaster.getModelDesiredResources(EMPTY)); - } - - - public AggregateConf getModelResolved() throws IOException { - return unmarshallToAggregateConf(appMaster.getModelResolved(EMPTY)); - } - - - public ConfTreeOperations getModelResolvedAppconf() throws IOException { - return unmarshallToCTO(appMaster.getModelResolvedAppconf(EMPTY)); - } - - - public ConfTreeOperations getModelResolvedResources() throws IOException { - return unmarshallToCTO(appMaster.getModelDesiredResources(EMPTY)); - } - - - public ConfTreeOperations getLiveResources() throws IOException { - return unmarshallToCTO(appMaster.getLiveResources(EMPTY)); - } - - - public Map enumContainers() throws IOException { - Messages.GetLiveContainersResponseProto response = - appMaster.getLiveContainers( - Messages.GetLiveContainersRequestProto.newBuilder().build()); - - int namesCount = response.getNamesCount(); - int records = response.getContainersCount(); - if (namesCount != records) { - throw new IOException("Number of names returned (" + namesCount - + ") does not match the number of records returned: " - + records); - } - Map map = new HashMap<>(namesCount); - for (int i = 0; i < namesCount; i++) { - map.put(response.getNames(i), unmarshall(response.getContainers(i))); - } - return map; - } - - - public ContainerInformation getContainer(String containerId) throws - IOException { - Messages.ContainerInformationProto response = - appMaster.getLiveContainer( - Messages.GetLiveContainerRequestProto.newBuilder() - .setContainerId(containerId) - .build()); - return unmarshall(response); - } - public List getContainers() throws IOException { Messages.GetLiveContainersResponseProto response = appMaster .getLiveContainers(Messages.GetLiveContainersRequestProto.newBuilder() @@ -408,34 +320,6 @@ public class SliderClusterOperations { return unmarshall(response); } - public Map enumComponents() throws IOException { - Messages.GetLiveComponentsResponseProto response = - appMaster.getLiveComponents( - Messages.GetLiveComponentsRequestProto.newBuilder().build()); - - int namesCount = response.getNamesCount(); - int records = response.getComponentsCount(); - if (namesCount != records) { - throw new IOException( - "Number of names returned (" + namesCount + ")" + - " does not match the number of records returned: " + records); - } - Map map = new HashMap<>(namesCount); - for (int i = 0; i < namesCount; i++) { - map.put(response.getNames(i), unmarshall(response.getComponents(i))); - } - return map; - } - - public ComponentInformation getComponent(String componentName) - throws IOException { - Messages.GetLiveComponentRequestProto.Builder builder = - Messages.GetLiveComponentRequestProto.newBuilder(); - builder.setName(componentName); - Messages.ComponentInformationProto proto = appMaster.getLiveComponent(builder.build()); - return unmarshall(proto); - } - public NodeInformationList getLiveNodes() throws IOException { Messages.GetLiveNodesResponseProto response = appMaster.getLiveNodes(Messages.GetLiveNodesRequestProto.newBuilder().build()); @@ -462,13 +346,4 @@ public class SliderClusterOperations { public void stop(String text) throws IOException { amSuicide(text, 3, 0); } - - public ApplicationLivenessInformation getApplicationLiveness() throws - IOException { - Messages.ApplicationLivenessInformationProto proto = - appMaster.getLivenessInformation( - Messages.GetApplicationLivenessRequestProto.newBuilder().build() - ); - return unmarshall(proto); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/RestClientFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/RestClientFactory.java deleted file mode 100644 index 4286596b6b5..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/RestClientFactory.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.client.rest; - -import com.sun.jersey.api.client.Client; -import com.sun.jersey.api.client.WebResource; -import org.apache.hadoop.registry.client.api.RegistryOperations; -import org.apache.slider.client.ClientRegistryBinder; -import org.apache.slider.api.SliderApplicationApi; -import org.apache.slider.core.registry.info.CustomRegistryConstants; - -import java.io.IOException; - -import static org.apache.slider.server.appmaster.web.rest.RestPaths.SLIDER_PATH_APPLICATION; - -/** - * Factory for the Rest client; hides the lookup and instantiation. - *

- * - */ -public class RestClientFactory { - - private final ClientRegistryBinder binder; - private final Client jerseyClient; - private final String user, serviceclass, instance; - - public RestClientFactory(RegistryOperations operations, - Client jerseyClient, - String user, - String serviceclass, - String instance) { - this.jerseyClient = jerseyClient; - this.user = user; - this.serviceclass = serviceclass; - this.instance = instance; - binder = new ClientRegistryBinder(operations); - } - - /** - * Locate the AM - * @return a resource to the AM - * @throws IOException any failure to resolve to the AM - */ - private WebResource locateAppmaster() throws IOException { - String restAPI = binder.lookupExternalRestAPI(user, serviceclass, instance, - CustomRegistryConstants.AM_REST_BASE); - return jerseyClient.resource(restAPI); - } - - /** - * Locate the slider AM then instantiate a client instance against - * its Application API. - * @return the instance - * @throws IOException on any failure - */ - public SliderApplicationApi createSliderAppApiClient() throws IOException { - WebResource appmaster = locateAppmaster(); - return createSliderAppApiClient(appmaster); - } - - /** - * Create a Slider application API client instance against - * its Application API. - * @param appmaster The AM to work against. - * @return the instance - * @throws IOException on any failure - */ - public SliderApplicationApi createSliderAppApiClient(WebResource appmaster) { - WebResource appResource = appmaster.path(SLIDER_PATH_APPLICATION); - return new SliderApplicationApiRestClient(jerseyClient, appResource); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java deleted file mode 100644 index 4c376e0da69..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.client.rest; - -import com.google.common.base.Preconditions; -import com.sun.jersey.api.client.Client; -import com.sun.jersey.api.client.ClientHandlerException; -import com.sun.jersey.api.client.GenericType; -import com.sun.jersey.api.client.UniformInterfaceException; -import com.sun.jersey.api.client.WebResource; -import com.sun.jersey.api.representation.Form; -import org.apache.commons.lang.StringUtils; -import org.apache.slider.api.types.ApplicationLivenessInformation; -import org.apache.slider.api.types.ComponentInformation; -import org.apache.slider.api.types.ContainerInformation; -import org.apache.slider.api.SliderApplicationApi; -import org.apache.slider.api.types.NodeInformation; -import org.apache.slider.api.types.NodeInformationList; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.exceptions.ExceptionConverter; -import org.apache.slider.core.restclient.HttpVerb; -import org.apache.slider.api.types.PingInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.ws.rs.core.MediaType; -import java.io.IOException; -import java.util.Map; - -import static org.apache.slider.server.appmaster.web.rest.RestPaths.*; - -/** - * Implementation of the {@link SliderApplicationApi} - */ -public class SliderApplicationApiRestClient extends BaseRestClient - implements SliderApplicationApi { - private static final Logger log = - LoggerFactory.getLogger(SliderApplicationApiRestClient.class); - private WebResource appResource; - - /** - * Create an instance - * @param jerseyClient jersey client for operations - * @param appResource resource of application API - */ - public SliderApplicationApiRestClient(Client jerseyClient, - WebResource appResource) { - super(jerseyClient); - this.appResource = appResource; - } - - /** - * Create an instance - * @param jerseyClient jersey client for operations - * @param appmaster URL of appmaster/proxy to AM - */ - public SliderApplicationApiRestClient(Client jerseyClient, String appmaster) { - super(jerseyClient); - WebResource amResource = jerseyClient.resource(appmaster); - amResource.type(MediaType.APPLICATION_JSON); - this.appResource = amResource.path(SLIDER_PATH_APPLICATION); - } - - - @Override - public String toString() { - final StringBuilder sb = - new StringBuilder("SliderApplicationApiRestClient{"); - sb.append("appResource=").append(appResource); - sb.append('}'); - return sb.toString(); - } - - /** - * Create a resource under the application path - * @param subpath path under application - * @return a resource under the application path - */ - public WebResource applicationResource(String subpath) { - Preconditions.checkArgument(!StringUtils.isEmpty(subpath), - "empty path"); - Preconditions.checkNotNull(appResource, "Null app resource"); - return appResource.path(subpath); - } - - /** - * Get operation against a path under the Application - * @param type expected - * @param subpath path - * @param c class to instantiate - * @return instance - * @throws IOException on any problem - */ - public T getApplicationResource(String subpath, Class c) - throws IOException { - return appResourceOperation(HttpVerb.GET, subpath, c); - } - - /** - * Get operation against a path under the Application - * @param type expected - * @param subpath path - * @param t type info - * @return instance - * @throws IOException on any problem - */ - public T getApplicationResource(String subpath, GenericType t) - throws IOException { - return appResourceOperation(HttpVerb.GET, subpath, t); - } - - /** - * - * @param method method to exec - * @param type expected - * @param subpath path - * @param c class to instantiate - * @return instance - * @throws IOException on any problem - */ - public T appResourceOperation(HttpVerb method, String subpath, Class c) - throws IOException { - return exec(method, applicationResource(subpath), c); - } - - - /** - * Get operation against a path under the Application - * @param type expected - * @param subpath path - * @param t type info - * @return instance - * @throws IOException on any problem - */ - public T appResourceOperation(HttpVerb method, String subpath, - GenericType t) - throws IOException { - return exec(method, applicationResource(subpath), t); - } - - - @Override - public AggregateConf getDesiredModel() throws IOException { - return getApplicationResource(MODEL_DESIRED, AggregateConf.class); - } - - @Override - public ConfTreeOperations getDesiredAppconf() throws IOException { - ConfTree resource = - getApplicationResource(MODEL_DESIRED_APPCONF, ConfTree.class); - return new ConfTreeOperations(resource); - } - - @Override - public ConfTreeOperations getDesiredResources() throws IOException { - ConfTree resource = - getApplicationResource(MODEL_DESIRED_RESOURCES, ConfTree.class); - return new ConfTreeOperations(resource); - } - - @Override - public AggregateConf getResolvedModel() throws IOException { - return getApplicationResource(MODEL_RESOLVED, AggregateConf.class); - } - - - @Override - public ConfTreeOperations getResolvedAppconf() throws IOException { - ConfTree resource = - getApplicationResource(MODEL_RESOLVED_APPCONF, ConfTree.class); - return new ConfTreeOperations(resource); - } - - @Override - public ConfTreeOperations getResolvedResources() throws IOException { - ConfTree resource = - getApplicationResource(MODEL_RESOLVED_RESOURCES, ConfTree.class); - return new ConfTreeOperations(resource); - } - - @Override - public ConfTreeOperations getLiveResources() throws IOException { - ConfTree resource = - getApplicationResource(LIVE_RESOURCES, ConfTree.class); - return new ConfTreeOperations(resource); - } - - @Override - public Map enumContainers() throws - IOException { - return getApplicationResource(LIVE_CONTAINERS, - new GenericType>() { - }); - } - - @Override - public ContainerInformation getContainer(String containerId) throws - IOException { - return getApplicationResource(LIVE_CONTAINERS + "/" + containerId, - ContainerInformation.class); - } - - @Override - public Map enumComponents() throws - IOException { - return getApplicationResource(LIVE_COMPONENTS, - new GenericType>() { }); - } - - @Override - public ComponentInformation getComponent(String componentName) throws - IOException { - return getApplicationResource(LIVE_COMPONENTS + "/" + componentName, - ComponentInformation.class); - } - - @Override - public NodeInformationList getLiveNodes() throws IOException { - return getApplicationResource(LIVE_NODES, NodeInformationList.class); - } - - @Override - public NodeInformation getLiveNode(String hostname) throws IOException { - return getApplicationResource(LIVE_NODES + "/" + hostname, - NodeInformation.class); - } - - @Override - public PingInformation ping(String text) throws IOException { - return pingPost(text); - } - - /** - * Ping as a GET - * @param text text to include - * @return the response - * @throws IOException on any failure - */ - public PingInformation pingGet(String text) throws IOException { - WebResource pingResource = applicationResource(ACTION_PING); - pingResource.getUriBuilder().queryParam("body", text); - return pingResource.get(PingInformation.class); - } - - /** - * Ping as a POST - * @param text text to include - * @return the response - * @throws IOException on any failure - */ - public PingInformation pingPost(String text) throws IOException { - WebResource pingResource = applicationResource(ACTION_PING); - Form f = new Form(); - f.add("text", text); - return pingResource - .type(MediaType.APPLICATION_JSON_TYPE) - .post(PingInformation.class, f); - } - - @Override - public void stop(String text) throws IOException { - WebResource resource = applicationResource(ACTION_STOP); - resource.post(text); - } - - @Override - public ApplicationLivenessInformation getApplicationLiveness() throws IOException { - return getApplicationResource(LIVE_LIVENESS, - ApplicationLivenessInformation.class); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java index 2d471ed88f2..a2d4e38fb73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java @@ -19,17 +19,6 @@ package org.apache.slider.common.params; import com.beust.jcommander.Parameter; -import com.beust.jcommander.ParametersDelegate; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.fs.Path; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.exceptions.BadCommandArgumentsException; -import org.apache.slider.providers.SliderProviderFactory; - -import java.io.File; -import java.util.List; -import java.util.Map; /** * Abstract Action to build things; shares args across build and diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java index 591931237c7..5f5e611511e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java @@ -236,80 +236,6 @@ public class CoreFileSystem { return new Path(path, SliderKeys.RESOURCE_DIR + "/" + dirName + "/" + fileName); } - /** - * Build up the path string for cluster resource install location -no - * attempt to create the directory is made - * - * @return the path for resource - */ - public Path buildClusterResourcePath(String clusterName, String component) { - Preconditions.checkNotNull(clusterName); - Path path = buildClusterDirPath(clusterName); - return new Path(path, SliderKeys.RESOURCE_DIR + "/" + component); - } - - /** - * Build up the path string for cluster resource install location -no - * attempt to create the directory is made - * - * @return the path for resource - */ - public Path buildClusterResourcePath(String clusterName) { - Preconditions.checkNotNull(clusterName); - Path path = buildClusterDirPath(clusterName); - return new Path(path, SliderKeys.RESOURCE_DIR); - } - - /** - * Create the Slider cluster path for a named cluster and all its subdirs - * This is a directory; a mkdirs() operation is executed - * to ensure that it is there. - * - * @param clustername name of the cluster - * @return the path to the cluster directory - * @throws java.io.IOException trouble - * @throws SliderException slider-specific exceptions - */ - public Path createClusterDirectories(String clustername, Configuration conf) - throws IOException, SliderException { - - - Path clusterDirectory = buildClusterDirPath(clustername); - InstancePaths instancePaths = new InstancePaths(clusterDirectory); - createClusterDirectories(instancePaths); - return clusterDirectory; - } - - /** - * Create the Slider cluster path for a named cluster and all its subdirs - * This is a directory; a mkdirs() operation is executed - * to ensure that it is there. - * - * @param instancePaths instance paths - * @throws IOException trouble - * @throws SliderException slider-specific exceptions - */ - public void createClusterDirectories(InstancePaths instancePaths) throws - IOException, SliderException { - Path instanceDir = instancePaths.instanceDir; - - verifyDirectoryNonexistent(instanceDir); - FsPermission clusterPerms = getInstanceDirectoryPermissions(); - createWithPermissions(instanceDir, clusterPerms); - createWithPermissions(instancePaths.snapshotConfPath, clusterPerms); - createWithPermissions(instancePaths.generatedConfPath, clusterPerms); - createWithPermissions(instancePaths.historyPath, clusterPerms); - createWithPermissions(instancePaths.tmpPathAM, clusterPerms); - - // Data Directory - String dataOpts = - configuration.get(SliderXmlConfKeys.DATA_DIRECTORY_PERMISSIONS, - SliderXmlConfKeys.DEFAULT_DATA_DIRECTORY_PERMISSIONS); - log.debug("Setting data directory permissions to {}", dataOpts); - createWithPermissions(instancePaths.dataPath, new FsPermission(dataOpts)); - - } - /** * Create a directory with the given permissions. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java index c0ef2d45a63..9d7c40adce4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java @@ -19,7 +19,6 @@ package org.apache.slider.common.tools; import com.google.common.base.Preconditions; - import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.archivers.zip.ZipArchiveEntry; @@ -52,23 +51,18 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.slider.Slider; -import org.apache.slider.api.InternalKeys; import org.apache.slider.api.RoleKeys; import org.apache.slider.api.types.ContainerInformation; import org.apache.slider.common.SliderKeys; import org.apache.slider.common.SliderXmlConfKeys; import org.apache.slider.common.params.Arguments; import org.apache.slider.common.params.SliderActions; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.conf.MapOperations; import org.apache.slider.core.exceptions.BadClusterStateException; import org.apache.slider.core.exceptions.BadCommandArgumentsException; import org.apache.slider.core.exceptions.BadConfigException; -import org.apache.slider.core.exceptions.ErrorStrings; import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.core.launch.ClasspathConstructor; import org.apache.slider.core.main.LauncherExitCodes; -import org.apache.slider.providers.agent.AgentKeys; import org.apache.slider.server.services.utility.PatternValidator; import org.apache.slider.server.services.workflow.ForkedProcessService; import org.apache.zookeeper.server.util.KerberosUtil; @@ -81,7 +75,6 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; -import java.io.FileReader; import java.io.FilenameFilter; import java.io.IOException; import java.io.InputStream; @@ -112,7 +105,6 @@ import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TimeZone; -import java.util.Timer; import java.util.TimerTask; import java.util.TreeMap; import java.util.TreeSet; @@ -1778,61 +1770,6 @@ public final class SliderUtils { VersionInfo.getBranch() + " @" + VersionInfo.getSrcChecksum()); } - /** - * Set the time for an information (human, machine) timestamp pair of fields. - * The human time is the time in millis converted via the {@link Date} class. - * @param info info fields - * @param keyHumanTime name of human time key - * @param keyMachineTime name of machine time - * @param time timestamp - */ - public static void setInfoTime(Map info, - String keyHumanTime, - String keyMachineTime, - long time) { - info.put(keyHumanTime, SliderUtils.toGMTString(time)); - info.put(keyMachineTime, Long.toString(time)); - } - - public static Path extractImagePath(CoreFileSystem fs, - MapOperations internalOptions) throws - SliderException, IOException { - Path imagePath; - String imagePathOption = - internalOptions.get(InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH); - String appHomeOption = - internalOptions.get(InternalKeys.INTERNAL_APPLICATION_HOME); - if (!isUnset(imagePathOption)) { - if (!isUnset(appHomeOption)) { - throw new BadClusterStateException( - ErrorStrings.E_BOTH_IMAGE_AND_HOME_DIR_SPECIFIED); - } - imagePath = fs.createPathThatMustExist(imagePathOption); - } else { - imagePath = null; - if (isUnset(appHomeOption)) { - throw new BadClusterStateException( - ErrorStrings.E_NO_IMAGE_OR_HOME_DIR_SPECIFIED); - } - } - return imagePath; - } - - /** - * trigger a JVM halt with no clean shutdown at all - * @param status status code for exit - * @param text text message - * @param delay delay in millis - * @return the timer (assuming the JVM hasn't halted yet) - * - */ - public static Timer haltAM(int status, String text, int delay) { - - Timer timer = new Timer("halt timer", false); - timer.schedule(new DelayedHalt(status, text), delay); - return timer; - } - public static String propertiesToString(Properties props) { TreeSet keys = new TreeSet<>(props.stringPropertyNames()); StringBuilder builder = new StringBuilder(); @@ -2378,40 +2315,6 @@ public final class SliderUtils { return f.getAbsolutePath(); } - /** - * return the HDFS path where the application package has been uploaded - * manually or by using slider client (install package command) - * - * @param conf configuration - * @return - */ - public static String getApplicationDefinitionPath(ConfTreeOperations conf) - throws BadConfigException { - return getApplicationDefinitionPath(conf, null); - } - - /** - * return the HDFS path where the application package has been uploaded - * manually or by using slider client (install package command) - * - * @param conf configuration - * @param roleGroup name of component - * @return - */ - public static String getApplicationDefinitionPath(ConfTreeOperations conf, - String roleGroup) - throws BadConfigException { - String appDefPath = conf.getGlobalOptions().getMandatoryOption( - AgentKeys.APP_DEF); - if (roleGroup != null) { - MapOperations component = conf.getComponent(roleGroup); - if (component != null) { - appDefPath = component.getOption(AgentKeys.APP_DEF, appDefPath); - } - } - return appDefPath; - } - /** * return the path to the slider-client.xml used by the current running * slider command @@ -2486,25 +2389,6 @@ public final class SliderUtils { + version + "; The path to it is: " + javaHome; } - /** - * return a description of whether the current user has created credential - * cache files from kerberos servers - * - * @throws IOException - * @throws BadConfigException - * @throws SecurityException - * - if a security manager exists and its checkPropertyAccess - * method doesn't allow access to the specified system property. - */ - public static String checkCredentialCacheFile() throws IOException, - BadConfigException { - String result = null; - if (!Shell.WINDOWS) { - result = Shell.execCommand("klist"); - } - return result; - } - /** * Compare the times of two applications: most recent app comes first * Specifically: the one whose start time value is greater. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceBuilder.java deleted file mode 100644 index f14a07a5e50..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceBuilder.java +++ /dev/null @@ -1,520 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.buildutils; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.slider.api.InternalKeys; -import org.apache.slider.api.OptionKeys; -import org.apache.slider.api.ResourceKeys; -import org.apache.slider.api.StatusKeys; -import org.apache.slider.common.SliderKeys; -import org.apache.slider.common.SliderXmlConfKeys; -import org.apache.slider.common.tools.CoreFileSystem; -import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.conf.MapOperations; -import org.apache.slider.core.exceptions.BadClusterStateException; -import org.apache.slider.core.exceptions.BadConfigException; -import org.apache.slider.core.exceptions.ErrorStrings; -import org.apache.slider.core.exceptions.SliderException; -import org.apache.slider.core.persist.ConfPersister; -import org.apache.slider.core.persist.InstancePaths; -import org.apache.slider.core.persist.LockAcquireFailedException; -import org.apache.slider.core.persist.LockHeldAction; -import org.apache.slider.core.zk.ZKPathBuilder; -import org.apache.slider.core.zk.ZookeeperUtils; -import org.apache.slider.providers.agent.AgentKeys; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.TreeSet; - -import static org.apache.slider.api.InternalKeys.INTERNAL_ADDONS_DIR_PATH; -import static org.apache.slider.api.InternalKeys.INTERNAL_APPDEF_DIR_PATH; -import static org.apache.slider.api.InternalKeys.INTERNAL_QUEUE; -import static org.apache.slider.api.OptionKeys.INTERNAL_AM_TMP_DIR; -import static org.apache.slider.api.OptionKeys.INTERNAL_TMP_DIR; -import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_HOME; -import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH; -import static org.apache.slider.api.OptionKeys.INTERNAL_DATA_DIR_PATH; -import static org.apache.slider.api.OptionKeys.INTERNAL_GENERATED_CONF_PATH; -import static org.apache.slider.api.OptionKeys.INTERNAL_SNAPSHOT_CONF_PATH; -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_HOSTS; -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH; -import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM; -import static org.apache.slider.api.RoleKeys.ROLE_PREFIX; -import static org.apache.slider.common.SliderKeys.COMPONENT_AM; -import static org.apache.slider.common.SliderKeys.COMPONENT_SEPARATOR; -import static org.apache.slider.common.SliderKeys.COMPONENT_TYPE_EXTERNAL_APP; -import static org.apache.slider.common.SliderKeys.COMPONENT_TYPE_KEY; -import static org.apache.slider.common.tools.SliderUtils.isClusternameValid; - -/** - * Build up the instance of a cluster. - */ -public class InstanceBuilder { - - private final String clustername; - private final Configuration conf; - private final CoreFileSystem coreFS; - private final InstancePaths instancePaths; - private AggregateConf instanceDescription; - private Map externalAppDefs = new HashMap<>(); - private TreeSet priorities = new TreeSet<>(); - - private static final Logger log = - LoggerFactory.getLogger(InstanceBuilder.class); - - public InstanceBuilder(CoreFileSystem coreFileSystem, - Configuration conf, - String clustername) { - this.clustername = clustername; - this.conf = conf; - this.coreFS = coreFileSystem; - Path instanceDir = coreFileSystem.buildClusterDirPath(clustername); - instancePaths = new InstancePaths(instanceDir); - - } - - public AggregateConf getInstanceDescription() { - return instanceDescription; - } - - public InstancePaths getInstancePaths() { - return instancePaths; - } - - - @Override - public String toString() { - return "Builder working with " + clustername + " at " + - getInstanceDir(); - } - - private Path getInstanceDir() { - return instancePaths.instanceDir; - } - - /** - * Initial part of the build process - * @param instanceConf - * @param provider - */ - public void init( - String provider, - AggregateConf instanceConf) { - - - this.instanceDescription = instanceConf; - - //internal is extended - ConfTreeOperations internalOps = instanceConf.getInternalOperations(); - - Map md = internalOps.getConfTree().metadata; - long time = System.currentTimeMillis(); - md.put(StatusKeys.INFO_CREATE_TIME_HUMAN, SliderUtils.toGMTString(time)); - md.put(StatusKeys.INFO_CREATE_TIME_MILLIS, Long.toString(time)); - - BuildHelper.addBuildMetadata(md, "create"); - SliderUtils.setInfoTime(md, - StatusKeys.INFO_CREATE_TIME_HUMAN, - StatusKeys.INFO_CREATE_TIME_MILLIS, - System.currentTimeMillis()); - - internalOps.set(INTERNAL_AM_TMP_DIR, - instancePaths.tmpPathAM.toUri()); - internalOps.set(INTERNAL_TMP_DIR, - instancePaths.tmpPath.toUri()); - internalOps.set(INTERNAL_SNAPSHOT_CONF_PATH, - instancePaths.snapshotConfPath.toUri()); - internalOps.set(INTERNAL_GENERATED_CONF_PATH, - instancePaths.generatedConfPath.toUri()); - internalOps.set(INTERNAL_DATA_DIR_PATH, - instancePaths.dataPath.toUri()); - internalOps.set(INTERNAL_APPDEF_DIR_PATH, - instancePaths.appDefPath.toUri()); - internalOps.set(INTERNAL_ADDONS_DIR_PATH, - instancePaths.addonsPath.toUri()); - - - internalOps.set(InternalKeys.INTERNAL_PROVIDER_NAME, provider); - internalOps.set(OptionKeys.APPLICATION_NAME, clustername); - - } - - /** - * Set the queue used to start the application - * @param queue - * @throws BadConfigException - */ - public void setQueue(String queue) throws BadConfigException { - if(queue != null) { - if(SliderUtils.isUnset(queue)) { - throw new BadConfigException("Queue value cannot be empty."); - } - - instanceDescription.getInternalOperations().set(INTERNAL_QUEUE, queue); - } - } - - /** - * Set up the image/app home path - * @param appImage path in the DFS to the tar file - * @param appHomeDir other strategy: home dir - * @throws BadConfigException if both are found - */ - public void setImageDetailsIfAvailable( - Path appImage, - String appHomeDir) throws BadConfigException { - boolean appHomeUnset = SliderUtils.isUnset(appHomeDir); - // App home or image - if (appImage != null) { - if (!appHomeUnset) { - // both args have been set - throw new BadConfigException( - ErrorStrings.E_BOTH_IMAGE_AND_HOME_DIR_SPECIFIED); - } - instanceDescription.getInternalOperations().set(INTERNAL_APPLICATION_IMAGE_PATH, - appImage.toUri()); - } else { - // the alternative is app home, which now MUST be set - if (!appHomeUnset) { - instanceDescription.getInternalOperations().set(INTERNAL_APPLICATION_HOME, - appHomeDir); - } - } - } - - - /** - * Propagate any critical principals from the current site config down to the HBase one. - */ - public void propagatePrincipals() { - String dfsPrincipal = conf.get(SliderXmlConfKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY); - if (dfsPrincipal != null) { - String siteDfsPrincipal = OptionKeys.SITE_XML_PREFIX + - SliderXmlConfKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; - instanceDescription.getAppConfOperations().set(siteDfsPrincipal, dfsPrincipal); - } - } - - public void propagateFilename() { - String fsDefaultName = conf.get( - CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY); - instanceDescription.getAppConfOperations().set(OptionKeys.SITE_XML_PREFIX + - CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, - fsDefaultName - ); - - instanceDescription.getAppConfOperations().set(OptionKeys.SITE_XML_PREFIX + - SliderXmlConfKeys.FS_DEFAULT_NAME_CLASSIC, - fsDefaultName - ); - - } - - - public void takeSnapshotOfConfDir(Path appconfdir) throws - IOException, - BadConfigException, - BadClusterStateException { - FileSystem srcFS = FileSystem.get(appconfdir.toUri(), conf); - if (!srcFS.isDirectory(appconfdir)) { - throw new BadConfigException( - "Source Configuration directory is not valid: %s", - appconfdir.toString()); - } - // bulk copy - FsPermission clusterPerms = coreFS.getInstanceDirectoryPermissions(); - // first the original from wherever to the DFS - SliderUtils.copyDirectory(conf, appconfdir, instancePaths.snapshotConfPath, - clusterPerms); - } - - - private Set getExternalComponents(ConfTreeOperations ops) - throws BadConfigException { - Set externalComponents = new HashSet<>(); - if (ops.getGlobalOptions().containsKey(COMPONENT_TYPE_KEY)) { - throw new BadConfigException(COMPONENT_TYPE_KEY + " must be " + - "specified per-component, not in global"); - } - - for (Entry> entry : ops.getComponents() - .entrySet()) { - if (COMPONENT_AM.equals(entry.getKey())) { - continue; - } - Map options = entry.getValue(); - if (COMPONENT_TYPE_EXTERNAL_APP.equals(options.get(COMPONENT_TYPE_KEY))) { - externalComponents.add(entry.getKey()); - } - } - return externalComponents; - } - - private void mergeExternalComponent(ConfTreeOperations ops, - ConfTreeOperations externalOps, String externalComponent, - Integer priority) throws BadConfigException { - for (String subComponent : externalOps.getComponentNames()) { - if (COMPONENT_AM.equals(subComponent)) { - continue; - } - String prefix = externalComponent + COMPONENT_SEPARATOR; - log.debug("Merging options for {} into {}", subComponent, - prefix + subComponent); - MapOperations subComponentOps = ops.getOrAddComponent( - prefix + subComponent); - if (priority == null) { - SliderUtils.mergeMaps(subComponentOps, - ops.getComponent(externalComponent).options); - subComponentOps.remove(COMPONENT_TYPE_KEY); - } - - SliderUtils.mergeMapsIgnoreDuplicateKeysAndPrefixes(subComponentOps, - externalOps.getComponent(subComponent), - SliderKeys.COMPONENT_KEYS_TO_SKIP); - - // add prefix to existing prefix - String existingPrefix = subComponentOps.get(ROLE_PREFIX); - if (existingPrefix != null) { - if (!subComponent.startsWith(existingPrefix)) { - throw new BadConfigException("Bad prefix " + existingPrefix + - " for subcomponent " + subComponent + " of " + externalComponent); - } - prefix = prefix + existingPrefix; - } - subComponentOps.set(ROLE_PREFIX, prefix); - - // adjust priority - if (priority != null) { - subComponentOps.put(ResourceKeys.COMPONENT_PRIORITY, - Integer.toString(priority)); - priorities.add(priority); - priority++; - } - } - } - - private int getNextPriority() { - if (priorities.isEmpty()) { - return 1; - } else { - return priorities.last() + 1; - } - } - - public void resolve() - throws BadConfigException, IOException, BadClusterStateException { - ConfTreeOperations appConf = instanceDescription.getAppConfOperations(); - ConfTreeOperations resources = instanceDescription.getResourceOperations(); - - for (Entry> entry : resources.getComponents() - .entrySet()) { - if (COMPONENT_AM.equals(entry.getKey())) { - continue; - } - if (entry.getValue().containsKey(ResourceKeys.COMPONENT_PRIORITY)) { - priorities.add(Integer.parseInt(entry.getValue().get( - ResourceKeys.COMPONENT_PRIORITY))); - } - } - - Set externalComponents = getExternalComponents(appConf); - if (!externalComponents.isEmpty()) { - log.info("Found external components {}", externalComponents); - } - - for (String component : externalComponents) { - if (!isClusternameValid(component)) { - throw new BadConfigException(component + " is not a valid external " + - "component"); - } - Path componentClusterDir = coreFS.buildClusterDirPath(component); - try { - coreFS.verifyPathExists(componentClusterDir); - } catch (IOException e) { - throw new BadConfigException("external component " + component + - " doesn't exist"); - } - AggregateConf componentConf = new AggregateConf(); - ConfPersister persister = new ConfPersister(coreFS, componentClusterDir); - try { - persister.load(componentConf); - } catch (Exception e) { - throw new BadConfigException("Couldn't read configuration for " + - "external component " + component); - } - - ConfTreeOperations componentAppConf = componentConf.getAppConfOperations(); - String externalAppDef = componentAppConf.get(AgentKeys.APP_DEF); - if (SliderUtils.isSet(externalAppDef)) { - Path newAppDef = new Path(coreFS.buildAppDefDirPath(clustername), - component + "_" + SliderKeys.DEFAULT_APP_PKG); - componentAppConf.set(AgentKeys.APP_DEF, newAppDef); - componentAppConf.append(AgentKeys.APP_DEF_ORIGINAL, externalAppDef); - log.info("Copying external appdef {} to {} for {}", externalAppDef, - newAppDef, component); - externalAppDefs.put(externalAppDef, newAppDef); - externalAppDef = newAppDef.toString(); - } - - for (String rcomp : componentConf.getResourceOperations() - .getComponentNames()) { - if (COMPONENT_AM.equals(rcomp)) { - continue; - } - log.debug("Adding component {} to appConf for {}", rcomp, component); - componentAppConf.getOrAddComponent(rcomp); - } - componentConf.resolve(); - - for (String rcomp : componentConf.getResourceOperations() - .getComponentNames()) { - if (COMPONENT_AM.equals(rcomp)) { - continue; - } - String componentAppDef = componentAppConf.getComponentOpt( - rcomp, AgentKeys.APP_DEF, null); - if (SliderUtils.isUnset(componentAppDef) || - componentAppDef.equals(externalAppDef)) { - continue; - } - if (externalAppDefs.containsKey(componentAppDef)) { - log.info("Using external appdef {} for {}", - externalAppDefs.get(componentAppDef), rcomp); - } else { - String existingPrefix = componentAppConf.getComponentOpt(rcomp, - ROLE_PREFIX, null); - if (SliderUtils.isUnset(existingPrefix)) { - existingPrefix = ""; - } else { - existingPrefix = COMPONENT_SEPARATOR + SliderUtils.trimPrefix( - existingPrefix); - } - Path newAppDef = new Path(coreFS.buildAppDefDirPath(clustername), - component + existingPrefix + "_" + SliderKeys.DEFAULT_APP_PKG); - externalAppDefs.put(componentAppDef, newAppDef); - log.info("Copying external appdef {} to {} for {}", componentAppDef, - newAppDef, component + COMPONENT_SEPARATOR + rcomp); - } - componentAppConf.setComponentOpt(rcomp, AgentKeys.APP_DEF, - externalAppDefs.get(componentAppDef).toString()); - componentAppConf.appendComponentOpt(rcomp, - AgentKeys.APP_DEF_ORIGINAL, componentAppDef); - } - Set newAppDefs = new HashSet<>(); - newAppDefs.addAll(externalAppDefs.values()); - if (newAppDefs.size() != externalAppDefs.size()) { - throw new IllegalStateException("Values repeat in external appdefs " - + externalAppDefs); - } - log.info("External appdefs after {}: {}", component, externalAppDefs); - - SliderUtils.mergeMapsIgnoreDuplicateKeys( - appConf.getConfTree().credentials, - componentAppConf.getConfTree().credentials); - - mergeExternalComponent(appConf, componentAppConf, component, null); - mergeExternalComponent(resources, componentConf.getResourceOperations(), - component, getNextPriority()); - } - } - - - /** - * Persist this - * @param appconfdir conf dir - * @param overwrite if true, we don't need to create cluster dir - * @throws IOException - * @throws SliderException - * @throws LockAcquireFailedException - */ - public void persist(Path appconfdir, boolean overwrite) throws - IOException, - SliderException, - LockAcquireFailedException { - if (!overwrite) { - coreFS.createClusterDirectories(instancePaths); - } - ConfPersister persister = - new ConfPersister(coreFS, getInstanceDir()); - ConfDirSnapshotAction action = null; - if (appconfdir != null) { - action = new ConfDirSnapshotAction(appconfdir); - } - persister.save(instanceDescription, action); - for (Entry appDef : externalAppDefs.entrySet()) { - SliderUtils.copy(conf, new Path(appDef.getKey()), appDef.getValue()); - } - } - - /** - * Add the ZK paths to the application options. - * - * @param zkBinding ZK binding - */ - public void addZKBinding(ZKPathBuilder zkBinding) throws BadConfigException { - - String quorum = zkBinding.getAppQuorum(); - if (SliderUtils.isSet(quorum)) { - MapOperations globalAppOptions = - instanceDescription.getAppConfOperations().getGlobalOptions(); - globalAppOptions.put(ZOOKEEPER_PATH, zkBinding.getAppPath()); - globalAppOptions.put(ZOOKEEPER_QUORUM, quorum); - globalAppOptions.put(ZOOKEEPER_HOSTS, - ZookeeperUtils.convertToHostsOnlyList(quorum)); - } - } - - /** - * Class to execute the snapshotting of the configuration directory - * while the persistence lock is held. - * - * This guarantees that there won't be an attempt to launch a cluster - * until the snapshot is complete -as the write lock won't be released - * until afterwards. - */ - private class ConfDirSnapshotAction implements LockHeldAction { - - private final Path appconfdir; - - private ConfDirSnapshotAction(Path appconfdir) { - this.appconfdir = appconfdir; - } - - @Override - public void execute() throws IOException, SliderException { - - takeSnapshotOfConfDir(appconfdir); - } - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceIO.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceIO.java deleted file mode 100644 index 262b6ee1e7f..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/InstanceIO.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.buildutils; - -import org.apache.hadoop.fs.Path; -import org.apache.slider.common.tools.CoreFileSystem; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.exceptions.BadClusterStateException; -import org.apache.slider.core.exceptions.SliderException; -import org.apache.slider.core.persist.ConfPersister; -import org.apache.slider.core.persist.LockAcquireFailedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -public class InstanceIO { - protected static final Logger log = - LoggerFactory.getLogger(InstanceIO.class); - - /** - * Load in an instance definition -but do not resolve it - * @param sliderFileSystem filesystem - * @param clusterDirectory CD - * @return the unresolved aggregate configuration - * @throws IOException - * @throws SliderException - * @throws BadClusterStateException if a lock could not be acquired - */ - public static AggregateConf loadInstanceDefinitionUnresolved( - CoreFileSystem sliderFileSystem, - Path clusterDirectory) - throws IOException, SliderException { - AggregateConf instanceDefinition = new AggregateConf(); - ConfPersister persister = - new ConfPersister(sliderFileSystem, clusterDirectory); - try { - persister.load(instanceDefinition); - } catch (LockAcquireFailedException e) { - log.debug("Lock acquisition failure of {}", clusterDirectory, e); - - throw new BadClusterStateException( - "Application at %s is locked for reading", - clusterDirectory.toString()); - } - return instanceDefinition; - } - - - /** - * Update a persisted instance definition - * @param coreFS filesystem - * @param dir directory to load from - * @param instanceDefinition instance definition to save do - * @throws SliderException - * @throws IOException - * @throws LockAcquireFailedException - */ - public static void saveInstanceDefinition(CoreFileSystem coreFS, - Path dir, - AggregateConf instanceDefinition) - throws SliderException, IOException, LockAcquireFailedException { - ConfPersister persister = - new ConfPersister(coreFS, dir); - persister.save(instanceDefinition, null); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AbstractInputPropertiesValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AbstractInputPropertiesValidator.java deleted file mode 100644 index 336b4dc7c2e..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AbstractInputPropertiesValidator.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.core.conf; - -import org.apache.slider.core.exceptions.BadConfigException; - -/** - * - */ -public abstract class AbstractInputPropertiesValidator implements InputPropertiesValidator{ - - abstract void validatePropertyNamePrefix(String key) throws BadConfigException; - - public void validate(ConfTreeOperations props) - throws BadConfigException { - validateGlobalProperties(props); - validateComponentProperties(props); - - } - - protected void validateComponentProperties(ConfTreeOperations props) - throws BadConfigException { - for (String compName : props.getComponentNames()) { - MapOperations mo = props.getComponent(compName); - if (mo == null) continue; - for (String key : mo.keySet()) { - validatePropertyNamePrefix(key); - } - } - } - - abstract void validateGlobalProperties(ConfTreeOperations props) - throws BadConfigException; - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AggregateConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AggregateConf.java deleted file mode 100644 index a2724205a0c..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/AggregateConf.java +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.conf; - -import org.apache.commons.lang.RandomStringUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.slider.common.SliderKeys; -import org.apache.slider.core.exceptions.BadConfigException; -import org.codehaus.jackson.annotate.JsonIgnore; -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.annotate.JsonSerialize; - -import java.io.IOException; - -/** - * Aggregate Configuration. - * - * It is serializable to JSON - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) -public final class AggregateConf { - - private String name; - private ConfTree resources; - private ConfTree internal; - private ConfTree appConf; - - private ConfTreeOperations resourceOperations; - private ConfTreeOperations appConfOperations; - private ConfTreeOperations internalOperations; - - private String passphrase; - - public AggregateConf() { - this(new ConfTree(), new ConfTree(), new ConfTree()); - } - - public AggregateConf(String name) { - this(new ConfTree(), new ConfTree(), new ConfTree()); - this.name = name; - } - - public AggregateConf(ConfTree resources, - ConfTree appConf, - ConfTree internal) { - setResources(resources); - setAppConf(appConf); - setInternal(internal); - } - - /** - * Take a snapshot of the configuration - * @param instanceDefinition source - * @throws IOException marshalling/copying problems - */ - public AggregateConf(AggregateConf instanceDefinition) throws IOException { - ConfTreeOperations resourcesSnapshot = - ConfTreeOperations.fromInstance(instanceDefinition.getResources()); - ConfTreeOperations appConfSnapshot = - ConfTreeOperations.fromInstance(instanceDefinition.getAppConf()); - ConfTreeOperations internalsSnapshot = - ConfTreeOperations.fromInstance(instanceDefinition.getInternal()); - //build a new aggregate from the snapshots - setResources(resourcesSnapshot.confTree); - setAppConf(appConfSnapshot.confTree); - setInternal(internalsSnapshot.confTree); - } - - public void setResources(ConfTree resources) { - this.resources = resources; - resourceOperations = new ConfTreeOperations(resources); - } - - public void setAppConf(ConfTree appConf) { - this.appConf = appConf; - appConfOperations = new ConfTreeOperations(appConf); - } - - public ConfTree getInternal() { - return internal; - } - - public void setInternal(ConfTree internal) { - this.internal = internal; - internalOperations = new ConfTreeOperations(internal); - } - - public ConfTree getResources() { - return resources; - } - - public ConfTree getAppConf() { - return appConf; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - @JsonIgnore - public ConfTreeOperations getResourceOperations() { - return resourceOperations; - } - - - @JsonIgnore - public ConfTreeOperations getAppConfOperations() { - return appConfOperations; - } - - @JsonIgnore - public ConfTreeOperations getInternalOperations() { - return internalOperations; - } - - /** - * predicate to query if all sections have data structures - * @return true if every section is non-null - */ - @JsonIgnore - public boolean isComplete() { - return resources != null && appConf != null && internal != null; - } - - public void validate() throws BadConfigException { - if (!isComplete()) { - throw new BadConfigException("Incomplete instance %s", this); - } - resourceOperations.validate(); - internalOperations.validate(); - appConfOperations.validate(); - } - - public void resolve() throws BadConfigException { - validate(); - resourceOperations.resolve(); - internalOperations.resolve(); - appConfOperations.resolve(); - } - - @JsonIgnore - public String getPassphrase() { - if (passphrase == null) { - passphrase = RandomStringUtils.randomAlphanumeric( - Integer.parseInt(SliderKeys.PASS_LEN)); - } - - return passphrase; - } - - /** - * Is this app package versioned? - * - * @return true if {@link SliderKeys#APP_VERSION} was set in the app config - * provided during creation of this app - * @since 0.80.0-incubating - */ - public boolean isVersioned() { - return StringUtils.isNotEmpty(getAppConfOperations().getGlobalOptions() - .get(SliderKeys.APP_VERSION)); - } - - /** - * string operation includes all the inner conftrees - * @return a string description - */ - @Override - public String toString() { - final StringBuilder sb = - new StringBuilder("{"); - sb.append(",\n\"internal\": ").append(internal); - sb.append(",\n\"resources\": ").append(resources); - sb.append(",\n\"appConf\" :").append(appConf); - sb.append('}'); - return sb.toString(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTree.java deleted file mode 100644 index 74b6abba590..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTree.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.conf; - -import org.apache.slider.core.persist.ConfTreeSerDeser; -import org.apache.slider.core.persist.PersistKeys; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.JsonMappingException; -import org.codehaus.jackson.map.annotate.JsonSerialize; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * A conf tree represents one of the configuration trees - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) -public final class ConfTree { - - /** - * Size of an initial map. This is kept low so the cost of having - * many conf trees in a process is low. - */ - public static final int INITAL_MAP_CAPACITY = 3; - - protected static final Logger - log = LoggerFactory.getLogger(ConfTree.class); - - /** - * version counter - */ - public String schema = PersistKeys.SCHEMA; - - /** - * Metadata - */ - public Map metadata = new HashMap<>(INITAL_MAP_CAPACITY); - - - /** - * Global options - */ - public Map global = - new HashMap<>(INITAL_MAP_CAPACITY); - - - /** - * Credentials - */ - public Map> credentials = - new HashMap<>(INITAL_MAP_CAPACITY); - - /** - * Role options, - * role -> option -> value - */ - public Map> components = - new HashMap<>(INITAL_MAP_CAPACITY); - - @Override - public String toString() { - try { - return toJson(); - } catch (Exception e) { - log.warn("Failed to convert to JSON ", e); - return super.toString(); - } - } - - /** - * Convert to a JSON string - * @return a JSON string description - * @throws IOException Problems mapping/writing the object - */ - public String toJson() throws IOException { - return ConfTreeSerDeser.toString(this); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTreeOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTreeOperations.java deleted file mode 100644 index 526e17d7d9e..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ConfTreeOperations.java +++ /dev/null @@ -1,527 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.conf; - -import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.exceptions.BadConfigException; -import org.apache.slider.core.persist.ConfTreeSerDeser; -import org.apache.slider.core.persist.PersistKeys; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.annotate.JsonIgnore; -import org.codehaus.jackson.map.JsonMappingException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -public class ConfTreeOperations { - - public final ConfTree confTree; - private final MapOperations globalOptions; - - protected static final Logger - log = LoggerFactory.getLogger(ConfTreeOperations.class); - - - public ConfTreeOperations(ConfTree confTree) { - assert confTree != null : "null tree"; - assert confTree.components != null : "null tree components"; - this.confTree = confTree; - globalOptions = new MapOperations("global", confTree.global); - } - - /** - * Get the underlying conf tree - * @return the tree - */ - public ConfTree getConfTree() { - return confTree; - } - - /** - * Validate the configuration - * @throws BadConfigException - */ - public void validate() throws BadConfigException { - validate(null); - } - - /** - * Validate the configuration - * @param validator a provided properties validator - * @throws BadConfigException - */ - public void validate(InputPropertiesValidator validator) throws BadConfigException { - String version = confTree.schema; - if (version == null) { - throw new BadConfigException("'version' undefined"); - } - if (!PersistKeys.SCHEMA.equals(version)) { - throw new BadConfigException( - "version %s incompatible with supported version %s", - version, - PersistKeys.SCHEMA); - } - if (validator != null) { - validator.validate(this); - } - } - - /** - * Resolve a ConfTree by mapping all global options into each component - * -if there is none there already - */ - public void resolve() { - for (Map.Entry> comp : confTree.components.entrySet()) { - mergeInGlobal(comp.getValue()); - } - } - - /** - * Merge any options - * @param component dest values - */ - public void mergeInGlobal(Map component) { - SliderUtils.mergeMapsIgnoreDuplicateKeys(component, confTree.global); - } - - /** - * Get operations on the global set - * @return a wrapped map - */ - public MapOperations getGlobalOptions() { - return globalOptions; - } - - - /** - * look up a component and return its options - * @param component component name - * @return component mapping or null - */ - public MapOperations getComponent(String component) { - Map instance = confTree.components.get(component); - if (instance != null) { - return new MapOperations(component, instance); - } - return null; - } - - /** - * look up a component and return its options with the specified replacements - * @param component component name - * @param replacementOptions replacement options - * @return component mapping or null - */ - public MapOperations getComponent(String component, Map - replacementOptions) { - Map instance = confTree.components.get(component); - if (instance != null) { - Map newInstance = new HashMap<>(); - newInstance.putAll(instance); - newInstance.putAll(replacementOptions); - return new MapOperations(component, newInstance); - } - return null; - } - - /** - * Get at the underlying component map - * @return a map of components. This is the raw ConfTree data structure - */ - public Map> getComponents() { - return confTree.components; - } - - /** - * Get a component -adding it to the components map if - * none with that name exists - * @param name role - * @return role mapping - */ - public MapOperations getOrAddComponent(String name) { - MapOperations operations = getComponent(name); - if (operations != null) { - return operations; - } - //create a new instances - Map map = new HashMap<>(); - confTree.components.put(name, map); - return new MapOperations(name, map); - } - - - /* - * return the Set of names names - */ - @JsonIgnore - public Set getComponentNames() { - return new HashSet(confTree.components.keySet()); - } - - - - /** - * Get a component whose presence is mandatory - * @param name component name - * @return the mapping - * @throws BadConfigException if the name is not there - */ - public MapOperations getMandatoryComponent(String name) throws - BadConfigException { - MapOperations ops = getComponent(name); - if (ops == null) { - throw new BadConfigException("Missing component " + name); - } - return ops; - } - - /** - * Set a global option, converting it to a string as needed - * @param key key - * @param value non null value - */ - public void set(String key, Object value) { - globalOptions.put(key, value.toString()); - } - /** - * get a global option - * @param key key - * @return value or null - * - */ - public String get(String key) { - return globalOptions.get(key); - } - /** - * append to a global option - * @param key key - * @return value - * - */ - public String append(String key, String value) { - if (SliderUtils.isUnset(value)) { - return null; - } - if (globalOptions.containsKey(key)) { - globalOptions.put(key, globalOptions.get(key) + "," + value); - } else { - globalOptions.put(key, value); - } - return globalOptions.get(key); - } - - /** - * Propagate all global keys matching a prefix - * @param src source - * @param prefix prefix - */ - public void propagateGlobalKeys(ConfTree src, String prefix) { - Map global = src.global; - for (Map.Entry entry : global.entrySet()) { - String key = entry.getKey(); - if (key.startsWith(prefix)) { - set(key, entry.getValue()); - } - } - } - - /** - * Propagate all global keys matching a prefix - * @param src source - * @param prefix prefix - */ - public void propagateGlobalKeys(ConfTreeOperations src, String prefix) { - propagateGlobalKeys(src.confTree, prefix); - } - - /** - * Merge the map of a single component - * @param component component name - * @param map map to merge - */ - public void mergeSingleComponentMap(String component, Map map) { - MapOperations comp = getOrAddComponent(component); - comp.putAll(map); - } - /** - * Merge the map of a single component - * @param component component name - * @param map map to merge - */ - public void mergeSingleComponentMapPrefix(String component, - Map map, - String prefix, - boolean overwrite) { - boolean needsMerge = false; - for (Map.Entry entry : map.entrySet()) { - String key = entry.getKey(); - if (key.startsWith(prefix)) { - needsMerge = true; - break; - } - } - if (!needsMerge) { - return; - } - MapOperations comp = getOrAddComponent(component); - comp.mergeMapPrefixedKeys(map,prefix, overwrite); - } - - /** - * Merge in components - * @param commandOptions component options on the CLI - */ - public void mergeComponents(Map> commandOptions) { - for (Map.Entry> entry : commandOptions.entrySet()) { - mergeSingleComponentMap(entry.getKey(), entry.getValue()); - } - } - - /** - * Merge in components - * @param commandOptions component options on the CLI - */ - public void mergeComponentsPrefix(Map> commandOptions, - String prefix, - boolean overwrite) { - for (Map.Entry> entry : commandOptions.entrySet()) { - mergeSingleComponentMapPrefix(entry.getKey(), entry.getValue(), prefix, overwrite); - } - } - - /** - * Merge in another tree -no overwrites of global or conf data - * (note that metadata does a naive putAll merge/overwrite) - * @param that the other tree - */ - public void mergeWithoutOverwrite(ConfTree that) { - - getGlobalOptions().mergeWithoutOverwrite(that.global); - confTree.metadata.putAll(that.metadata); - confTree.credentials.putAll(that.credentials); - - for (Map.Entry> entry : that.components.entrySet()) { - MapOperations comp = getOrAddComponent(entry.getKey()); - comp.mergeWithoutOverwrite(entry.getValue()); - } - } - - /** - * Merge in another tree with overwrites - * @param that the other tree - */ - public void merge(ConfTree that) { - - getGlobalOptions().putAll(that.global); - confTree.metadata.putAll(that.metadata); - confTree.credentials.putAll(that.credentials); - - for (Map.Entry> entry : that.components.entrySet()) { - MapOperations comp = getOrAddComponent(entry.getKey()); - comp.putAll(entry.getValue()); - } - } - - - /** - * Load from a resource. The inner conf tree is the loaded data -unresolved - * @param resource resource - * @return loaded value - * @throws IOException load failure - */ - public static ConfTreeOperations fromResource(String resource) throws - IOException { - ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser(); - ConfTreeOperations ops = new ConfTreeOperations( - confTreeSerDeser.fromResource(resource) ); - return ops; - } - - /** - * Load from a resource. The inner conf tree is the loaded data -unresolved - * @param resource resource - * @return loaded value - * @throws IOException load failure - */ - public static ConfTreeOperations fromFile(File resource) throws - IOException { - ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser(); - ConfTreeOperations ops = new ConfTreeOperations( - confTreeSerDeser.fromFile(resource) ); - return ops; - } - - /** - * Build from an existing instance -which is cloned via JSON ser/deser - * @param instance the source instance - * @return loaded value - * @throws IOException load failure - */ - public static ConfTreeOperations fromInstance(ConfTree instance) throws - IOException { - ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser(); - ConfTreeOperations ops = new ConfTreeOperations( - confTreeSerDeser.fromJson(confTreeSerDeser.toJson(instance)) ); - return ops; - } - - /** - * Load from a file and merge it in - * @param file file - * @throws IOException any IO problem - * @throws BadConfigException if the file is invalid - */ - public void mergeFile(File file) throws IOException, BadConfigException { - mergeFile(file, null); - } - - /** - * Load from a file and merge it in - * @param file file - * @param validator properties validator - * @throws IOException any IO problem - * @throws BadConfigException if the file is invalid - */ - public void mergeFile(File file, InputPropertiesValidator validator) throws IOException, BadConfigException { - ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser(); - ConfTree tree = confTreeSerDeser.fromFile(file); - ConfTreeOperations ops = new ConfTreeOperations(tree); - ops.validate(validator); - merge(ops.confTree); - } - - @Override - public String toString() { - return confTree.toString(); - } - - /** - * Convert to a JSON string - * @return a JSON string description - */ - public String toJson() throws IOException, - JsonGenerationException, - JsonMappingException { - return confTree.toJson(); - } - - /** - * Get a component option - * @param name component name - * @param option option name - * @param defVal default value - * @return resolved value - */ - public String getComponentOpt(String name, String option, String defVal) { - MapOperations roleopts = getComponent(name); - if (roleopts == null) { - return defVal; - } - return roleopts.getOption(option, defVal); - } - - /** - * Get a component opt; use {@link Integer#decode(String)} so as to take hex - * oct and bin values too. - * - * @param name component name - * @param option option name - * @param defVal default value - * @return parsed value - * @throws NumberFormatException if the role could not be parsed. - */ - public int getComponentOptInt(String name, String option, int defVal) { - String val = getComponentOpt(name, option, Integer.toString(defVal)); - return Integer.decode(val); - } - - /** - * Get a component opt as a boolean using {@link Boolean#valueOf(String)}. - * - * @param name component name - * @param option option name - * @param defVal default value - * @return parsed value - * @throws NumberFormatException if the role could not be parsed. - */ - public boolean getComponentOptBool(String name, String option, boolean defVal) { - String val = getComponentOpt(name, option, Boolean.toString(defVal)); - return Boolean.valueOf(val); - } - - /** - * Set a component option, creating the component if necessary - * @param component component name - * @param option option name - * @param val value - */ - public void setComponentOpt(String component, String option, String val) { - Map roleopts = getOrAddComponent(component); - roleopts.put(option, val); - } - - /** - * Set an integer role option, creating the role if necessary - * @param role role name - * @param option option name - * @param val integer value - */ - public void setComponentOpt(String role, String option, int val) { - setComponentOpt(role, option, Integer.toString(val)); - } - /** - * Set a long role option, creating the role if necessary - * @param role role name - * @param option option name - * @param val long value - */ - public void setComponentOpt(String role, String option, long val) { - setComponentOpt(role, option, Long.toString(val)); - } - - /** - * append to a component option - * @param key key - * @return value - * - */ - public String appendComponentOpt(String role, String key, String value) { - if (SliderUtils.isUnset(value)) { - return null; - } - MapOperations roleopts = getComponent(role); - if (roleopts == null) { - return null; - } - - if (roleopts.containsKey(key)) { - roleopts.put(key, roleopts.get(key) + "," + value); - } else { - roleopts.put(key, value); - } - return roleopts.get(key); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/InputPropertiesValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/InputPropertiesValidator.java deleted file mode 100644 index 237c2407507..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/InputPropertiesValidator.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.core.conf; - -import org.apache.slider.core.exceptions.BadConfigException; - -/** - * - */ -public interface InputPropertiesValidator { - void validate(ConfTreeOperations props) throws BadConfigException; -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ResourcesInputPropertiesValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ResourcesInputPropertiesValidator.java deleted file mode 100644 index 19f6f8d5bfe..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/ResourcesInputPropertiesValidator.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.core.conf; - -import org.apache.slider.api.ResourceKeys; -import org.apache.slider.core.exceptions.BadConfigException; - -/** - * - */ -public class ResourcesInputPropertiesValidator - extends AbstractInputPropertiesValidator { - - void validatePropertyNamePrefix(String key) throws BadConfigException { - if (!key.startsWith("yarn.") && !key.equals(ResourceKeys.UNIQUE_NAMES)) { - throw new BadConfigException( - "argument %s does not have 'yarn.' prefix", key); - } - } - - protected void validateGlobalProperties(ConfTreeOperations props) - throws BadConfigException { - for (String key : props.getGlobalOptions().keySet()) { - validatePropertyNamePrefix(key); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java deleted file mode 100644 index aad275704d1..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.core.conf; - -import org.apache.slider.core.exceptions.BadConfigException; - -/** - * - */ -public class TemplateInputPropertiesValidator - extends AbstractInputPropertiesValidator { - - void validatePropertyNamePrefix(String key) throws BadConfigException { - if (key.startsWith("yarn.")) { - throw new BadConfigException( - "argument %s has 'yarn.' prefix - this is not allowed in templates", key); - } - } - - @Override - void validateGlobalProperties(ConfTreeOperations props) { - // do nothing - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java index aefc0deff85..41824598095 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java @@ -304,30 +304,6 @@ public abstract class AbstractLauncher extends Configured { } } - /** - * Extract the value for option - * {@code yarn.resourcemanager.am.retry-count-window-ms} - * and set it on the ApplicationSubmissionContext. Use the default value - * if option is not set. - * - * @param submissionContext - * @param map - */ - public void extractAmRetryCount(ApplicationSubmissionContext submissionContext, - Map map) { - - if (map != null) { - MapOperations options = new MapOperations("", map); - long amRetryCountWindow = options.getOptionLong(ResourceKeys - .YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS, - ResourceKeys.DEFAULT_AM_RETRY_COUNT_WINDOW_MS); - log.info("Setting {} to {}", - ResourceKeys.YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS, - amRetryCountWindow); - submissionContext.setAttemptFailuresValidityInterval(amRetryCountWindow); - } - } - public void extractLogAggregationContext(Map map) { if (map != null) { String logPatternSepStr = "\\|"; @@ -453,24 +429,6 @@ public abstract class AbstractLauncher extends Configured { env.putAll(map); } - /** - * Important: the configuration must already be fully resolved - * in order to pick up global options - * Copy env vars into the launch context. - */ - public boolean copyEnvVars(MapOperations options) { - if (options == null) { - return false; - } - for (Map.Entry entry : options.entrySet()) { - String key = entry.getKey(); - if (key.startsWith(RoleKeys.ENV_PREFIX)) { - key = key.substring(RoleKeys.ENV_PREFIX.length()); - env.put(key, entry.getValue()); - } - } - return true; - } public String[] dumpEnvToString() { @@ -504,19 +462,6 @@ public abstract class AbstractLauncher extends Configured { addLocalResources(confResources); } - /** - * Return the label expression and if not set null - * @param map map to look up - * @return extracted label or null - */ - public String extractLabelExpression(Map map) { - if (map != null) { - MapOperations options = new MapOperations("", map); - return options.getOption(ResourceKeys.YARN_LABEL_EXPRESSION, null); - } - return null; - } - public void setDockerImage(String dockerImage) { this.dockerImage = dockerImage; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java deleted file mode 100644 index 90537b6fd2b..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.persist; - -import org.apache.slider.core.conf.AggregateConf; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.JsonParseException; -import org.codehaus.jackson.map.JsonMappingException; - -import java.io.IOException; - -/** - * Conf tree to JSON binding - */ -public class AggregateConfSerDeser extends JsonSerDeser { - public AggregateConfSerDeser() { - super(AggregateConf.class); - } - - - private static final AggregateConfSerDeser - staticinstance = new AggregateConfSerDeser(); - - /** - * Convert a tree instance to a JSON string -sync access to a shared ser/deser - * object instance - * @param instance object to convert - * @return a JSON string description - * @throws JsonParseException parse problems - * @throws JsonMappingException O/J mapping problems - */ - public static String toString(AggregateConf instance) throws IOException, - JsonGenerationException, - JsonMappingException { - synchronized (staticinstance) { - return staticinstance.toJson(instance); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java deleted file mode 100644 index 97592057fa1..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.persist; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.slider.common.tools.CoreFileSystem; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.exceptions.SliderException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Date; - -/** - * Class to implement persistence of a configuration. - * - * This code contains the logic to acquire and release locks. - * # writelock MUST be acquired exclusively for writes. This is done - * by creating the file with no overwrite - * # shared readlock MUST be acquired for reads. This is done by creating the readlock - * file with overwrite forbidden -but treating a failure as a sign that - * the lock exists, and therefore the operation can continue. - * # releaselock is only released if the client created it. - * # after acquiring either lock, client must check for the alternate lock - * existing. If it is, release lock and fail. - * - * There's one small race here: multiple readers; first reader releases lock - * while second is in use. - * - * Strict Fix: client checks for readlock after read completed. - * If it is not there, problem: fail. But this massively increases the risk of - * false negatives. - * - * This isn't 100% perfect, because of the condition where the owner releases - * a lock, a writer grabs its lock & writes to it, the reader gets slightly - * contaminated data: - * own-share-delete-write-own-release(shared)-delete - * - * We are assuming that the rate of change is low enough that this is rare, and - * of limited damage. - * - * ONCE A CLUSTER IS RUNNING, ONLY THE AM MAY PERSIST UPDATES VIA ITS APIs - * - * That is: outside the AM, a writelock MUST only be acquired after verifying there is no - * running application. - */ -public class ConfPersister { - private static final Logger log = - LoggerFactory.getLogger(ConfPersister.class); - - - private final ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser(); - - private final CoreFileSystem coreFS; - private final FileSystem fileSystem; - private final Path persistDir; - private final Path internal, resources, app_conf; - private final Path writelock, readlock; - - public ConfPersister(CoreFileSystem coreFS, Path persistDir) { - this.coreFS = coreFS; - this.persistDir = persistDir; - internal = new Path(persistDir, Filenames.INTERNAL); - resources = new Path(persistDir, Filenames.RESOURCES); - app_conf = new Path(persistDir, Filenames.APPCONF); - writelock = new Path(persistDir, Filenames.WRITELOCK); - readlock = new Path(persistDir, Filenames.READLOCK); - fileSystem = coreFS.getFileSystem(); - } - - /** - * Get the target directory - * @return the directory for persistence - */ - public Path getPersistDir() { - return persistDir; - } - - /** - * Make the persistent directory - * @throws IOException IO failure - */ - public void mkPersistDir() throws IOException { - coreFS.getFileSystem().mkdirs(persistDir); - } - - @Override - public String toString() { - return "Persister to " + persistDir; - } - - /** - * Acquire the writelock - * @throws IOException IO - * @throws LockAcquireFailedException - */ - @VisibleForTesting - void acquireWritelock() throws IOException, - LockAcquireFailedException { - mkPersistDir(); - long now = System.currentTimeMillis(); - try { - coreFS.cat(writelock, false, new Date(now).toGMTString()); - } catch (FileAlreadyExistsException e) { - // filesystems should raise this (HDFS does) - throw new LockAcquireFailedException(writelock); - } catch (IOException e) { - // some filesystems throw a generic IOE - throw new LockAcquireFailedException(writelock, e); - } - //here the lock is acquired, but verify there is no readlock - boolean lockFailure; - try { - lockFailure = readLockExists(); - } catch (IOException e) { - lockFailure = true; - } - if (lockFailure) { - releaseWritelock(); - throw new LockAcquireFailedException(readlock); - } - } - - @VisibleForTesting - boolean readLockExists() throws IOException { - return fileSystem.exists(readlock); - } - - /** - * Release the writelock if it is present. - * IOExceptions are logged - */ - @VisibleForTesting - boolean releaseWritelock() { - try { - return fileSystem.delete(writelock, false); - } catch (IOException e) { - log.warn("IOException releasing writelock {} ", writelock, e); - } - return false; - } - - /** - * Acquire the writelock - * @throws IOException IO - * @throws LockAcquireFailedException - * @throws FileNotFoundException if the target dir does not exist. - */ - @VisibleForTesting - boolean acquireReadLock() throws FileNotFoundException, - IOException, - LockAcquireFailedException { - if (!coreFS.getFileSystem().exists(persistDir)) { - // the dir is not there, so the data is not there, so there - // is nothing to read - throw new FileNotFoundException(persistDir.toString()); - } - long now = System.currentTimeMillis(); - boolean owner; - try { - coreFS.cat(readlock, false, new Date(now).toGMTString()); - owner = true; - } catch (IOException e) { - owner = false; - } - //here the lock is acquired, but verify there is no readlock - boolean lockFailure; - try { - lockFailure = writelockExists(); - } catch (IOException e) { - lockFailure = true; - } - if (lockFailure) { - releaseReadlock(owner); - throw new LockAcquireFailedException(writelock); - } - return owner; - } - - @VisibleForTesting - boolean writelockExists() throws IOException { - return fileSystem.exists(writelock); - } - - /** - * Release the writelock if it is present. - * IOExceptions are downgraded to failures - * @return true if the lock was present and then released - */ - @VisibleForTesting - boolean releaseReadlock(boolean owner) { - if (owner) { - try { - return fileSystem.delete(readlock, false); - } catch (IOException e) { - log.warn("IOException releasing writelock {} ", readlock, e); - } - } - return false; - } - - private void saveConf(AggregateConf conf) throws IOException { - confTreeSerDeser.save(fileSystem, internal, conf.getInternal(), true); - confTreeSerDeser.save(fileSystem, resources, conf.getResources(), true); - confTreeSerDeser.save(fileSystem, app_conf, conf.getAppConf(), true); - } - - private void loadConf(AggregateConf conf) throws IOException { - conf.setInternal(confTreeSerDeser.load(fileSystem, internal)); - conf.setResources(confTreeSerDeser.load(fileSystem, resources)); - conf.setAppConf(confTreeSerDeser.load(fileSystem, app_conf)); - } - - - private void maybeExecLockHeldAction(LockHeldAction action) throws - IOException, - SliderException { - if (action != null) { - action.execute(); - } - } - - /** - * Save the configuration - * @param conf configuration to fill in - * @param action - * @throws IOException IO problems - * @throws LockAcquireFailedException the lock could not be acquired - */ - public void save(AggregateConf conf, LockHeldAction action) throws - IOException, - SliderException, - LockAcquireFailedException { - acquireWritelock(); - try { - saveConf(conf); - maybeExecLockHeldAction(action); - } finally { - releaseWritelock(); - } - } - - /** - * Load the configuration. If a lock failure is raised, the - * contents of the configuration MAY have changed -lock race conditions - * are looked for on exit - * @param conf configuration to fill in - * @throws IOException IO problems - * @throws LockAcquireFailedException the lock could not be acquired - */ - public void load(AggregateConf conf) throws - FileNotFoundException, - IOException, - SliderException, - LockAcquireFailedException { - boolean owner = acquireReadLock(); - try { - loadConf(conf); - } finally { - releaseReadlock(owner); - } - } - - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java deleted file mode 100644 index 8271ef14f31..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.persist; - -import org.apache.slider.core.conf.ConfTree; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.JsonParseException; -import org.codehaus.jackson.map.JsonMappingException; - -import java.io.IOException; - -/** - * Conf tree to JSON binding - */ -public class ConfTreeSerDeser extends JsonSerDeser { - public ConfTreeSerDeser() { - super(ConfTree.class); - } - - - private static final ConfTreeSerDeser staticinstance = new ConfTreeSerDeser(); - - /** - * Convert a tree instance to a JSON string -sync access to a shared ser/deser - * object instance - * @param instance object to convert - * @return a JSON string description - * @throws JsonParseException parse problems - * @throws JsonMappingException O/J mapping problems - */ - public static String toString(ConfTree instance) throws IOException, - JsonGenerationException, - JsonMappingException { - synchronized (staticinstance) { - return staticinstance.toJson(instance); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java deleted file mode 100644 index da585200e7e..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.persist; - -import org.apache.hadoop.fs.Path; - -public class LockAcquireFailedException extends Exception { - - private final Path path; - - public LockAcquireFailedException(Path path) { - super("Failed to acquire lock " +path); - this.path = path; - } - - public LockAcquireFailedException(Path path, Throwable cause) { - super("Failed to acquire lock " + path, cause); - this.path = path; - } - - public Path getPath() { - return path; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java index 42e103a847f..df174f55c86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java @@ -20,43 +20,22 @@ package org.apache.slider.providers; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.registry.client.api.RegistryOperations; import org.apache.slider.common.tools.SliderFileSystem; import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.conf.MapOperations; -import org.apache.slider.core.exceptions.BadClusterStateException; import org.apache.slider.core.exceptions.SliderException; -import org.apache.slider.core.launch.AbstractLauncher; import org.codehaus.jettison.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.HashSet; +import java.util.List; import java.util.Set; -import static org.apache.slider.api.ResourceKeys.COMPONENT_INSTANCES; -import static org.apache.slider.api.ResourceKeys.DEF_YARN_CORES; -import static org.apache.slider.api.ResourceKeys.DEF_YARN_MEMORY; -import static org.apache.slider.api.ResourceKeys.YARN_CORES; -import static org.apache.slider.api.ResourceKeys.YARN_MEMORY; - public abstract class AbstractClientProvider extends Configured { private static final Logger log = LoggerFactory.getLogger(AbstractClientProvider.class); - protected static final ProviderUtils providerUtils = - new ProviderUtils(log); - - public static final String PROVIDER_RESOURCE_BASE = - "org/apache/slider/providers/"; - public static final String PROVIDER_RESOURCE_BASE_ROOT = - "/" + PROVIDER_RESOURCE_BASE; public AbstractClientProvider(Configuration conf) { super(conf); @@ -66,150 +45,6 @@ public abstract class AbstractClientProvider extends Configured { public abstract List getRoles(); - /** - * Verify that an instance definition is considered valid by the provider - * @param instanceDefinition instance definition - * @throws SliderException if the configuration is not valid - */ - public void validateInstanceDefinition(AggregateConf instanceDefinition, SliderFileSystem fs) throws - SliderException { - - List roles = getRoles(); - ConfTreeOperations resources = - instanceDefinition.getResourceOperations(); - for (ProviderRole role : roles) { - String name = role.name; - MapOperations component = resources.getComponent(role.group); - if (component != null) { - String instances = component.get(COMPONENT_INSTANCES); - if (instances == null) { - String message = "No instance count provided for " + name; - log.error("{} with \n{}", message, resources.toString()); - throw new BadClusterStateException(message); - } - String ram = component.get(YARN_MEMORY); - String cores = component.get(YARN_CORES); - - - providerUtils.getRoleResourceRequirement(ram, - DEF_YARN_MEMORY, - Integer.MAX_VALUE); - providerUtils.getRoleResourceRequirement(cores, - DEF_YARN_CORES, - Integer.MAX_VALUE); - } - } - } - - - /** - * Prepare the AM settings for launch - * @param fileSystem filesystem - * @param serviceConf configuration of the client - * @param launcher launcher to set up - * @param instanceDescription instance description being launched - * @param snapshotConfDirPath - * @param generatedConfDirPath - * @param clientConfExtras - * @param libdir - * @param tempPath - * @param miniClusterTestRun flag set to true on a mini cluster run - * @throws IOException - * @throws SliderException - */ - public void prepareAMAndConfigForLaunch(SliderFileSystem fileSystem, - Configuration serviceConf, - AbstractLauncher launcher, - AggregateConf instanceDescription, - Path snapshotConfDirPath, - Path generatedConfDirPath, - Configuration clientConfExtras, - String libdir, - Path tempPath, - boolean miniClusterTestRun) - throws IOException, SliderException { - - } - - /** - * Load in and merge in templates. Null arguments means "no such template" - * @param instanceConf instance to patch - * @param internalTemplate patch to internal.json - * @param resourceTemplate path to resources.json - * @param appConfTemplate path to app_conf.json - * @throws IOException any IO problems - */ - protected void mergeTemplates(AggregateConf instanceConf, - String internalTemplate, - String resourceTemplate, - String appConfTemplate) throws IOException { - if (internalTemplate != null) { - ConfTreeOperations template = - ConfTreeOperations.fromResource(internalTemplate); - instanceConf.getInternalOperations() - .mergeWithoutOverwrite(template.confTree); - } - - if (resourceTemplate != null) { - ConfTreeOperations resTemplate = - ConfTreeOperations.fromResource(resourceTemplate); - instanceConf.getResourceOperations() - .mergeWithoutOverwrite(resTemplate.confTree); - } - - if (appConfTemplate != null) { - ConfTreeOperations template = - ConfTreeOperations.fromResource(appConfTemplate); - instanceConf.getAppConfOperations() - .mergeWithoutOverwrite(template.confTree); - } - - } - - /** - * This is called pre-launch to validate that the cluster specification - * is valid. This can include checking that the security options - * are in the site files prior to launch, that there are no conflicting operations - * etc. - * - * This check is made prior to every launch of the cluster -so can - * pick up problems which manually edited cluster files have added, - * or from specification files from previous versions. - * - * The provider MUST NOT change the remote specification. This is - * purely a pre-launch validation of options. - * - * - * @param sliderFileSystem filesystem - * @param clustername name of the cluster - * @param configuration cluster configuration - * @param instanceDefinition cluster specification - * @param clusterDirPath directory of the cluster - * @param generatedConfDirPath path to place generated artifacts - * @param secure flag to indicate that the cluster is secure - * @throws SliderException on any validation issue - * @throws IOException on any IO problem - */ - public void preflightValidateClusterConfiguration(SliderFileSystem sliderFileSystem, - String clustername, - Configuration configuration, - AggregateConf instanceDefinition, - Path clusterDirPath, - Path generatedConfDirPath, - boolean secure) - throws SliderException, IOException { - validateInstanceDefinition(instanceDefinition, sliderFileSystem); - } - - /** - * Return a set of application specific string tags. - * @return the set of tags. - */ - public Set getApplicationTags(SliderFileSystem fileSystem, - ConfTreeOperations appConf, String appName) throws SliderException { - return Collections.emptySet(); - } - /** * Generates a fixed format of application tags given one or more of * application name, version and description. This allows subsequent query for diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java index c80de7f69fd..c31b2ac0d28 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java @@ -18,29 +18,18 @@ package org.apache.slider.providers; -import org.apache.hadoop.registry.client.types.ServiceRecord; import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; -import org.apache.slider.api.ClusterDescription; import org.apache.slider.api.resource.Application; import org.apache.slider.common.tools.SliderFileSystem; -import org.apache.slider.core.conf.AggregateConf; import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.core.launch.ContainerLauncher; -import org.apache.slider.core.main.ExitCodeProvider; -import org.apache.slider.server.appmaster.actions.QueueAccess; -import org.apache.slider.server.appmaster.operations.RMOperationHandlerActions; -import org.apache.slider.server.appmaster.state.ContainerReleaseSelector; import org.apache.slider.server.appmaster.state.StateAccessForProviders; import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders; -import java.io.File; import java.io.IOException; -import java.net.URL; -import java.util.List; -import java.util.Map; public interface ProviderService extends Service { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java index 8b88c2895bf..1d5d8a007c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java @@ -19,10 +19,6 @@ package org.apache.slider.providers.docker; import org.apache.hadoop.conf.Configuration; import org.apache.slider.common.SliderKeys; -import org.apache.slider.common.tools.SliderFileSystem; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTreeOperations; -import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.providers.AbstractClientProvider; import org.apache.slider.providers.ProviderRole; import org.apache.slider.providers.ProviderUtils; @@ -31,7 +27,6 @@ import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.List; -import java.util.Set; public class DockerClientProvider extends AbstractClientProvider implements SliderKeys { @@ -55,17 +50,4 @@ public class DockerClientProvider extends AbstractClientProvider return Collections.emptyList(); } - @Override - public void validateInstanceDefinition(AggregateConf instanceDefinition, - SliderFileSystem fs) throws SliderException { - super.validateInstanceDefinition(instanceDefinition, fs); - //TODO validate Application payload, part of that is already done in ApplicationApiService, need to do more - } - - @Override - public Set getApplicationTags(SliderFileSystem fileSystem, - ConfTreeOperations appConf, String appName) throws SliderException { - return createApplicationTags(appName, null, null); - } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java index 4fa27691a43..eca07e69d73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; -import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.registry.client.api.RegistryOperations; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; @@ -76,7 +75,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; -import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.WebAppException; import org.apache.hadoop.yarn.webapp.WebApps; @@ -98,7 +96,6 @@ import org.apache.slider.common.tools.PortScanner; import org.apache.slider.common.tools.SliderFileSystem; import org.apache.slider.common.tools.SliderUtils; import org.apache.slider.common.tools.SliderVersionInfo; -import org.apache.slider.core.conf.AggregateConf; import org.apache.slider.core.conf.MapOperations; import org.apache.slider.core.exceptions.BadConfigException; import org.apache.slider.core.exceptions.SliderException; @@ -137,7 +134,6 @@ import org.apache.slider.server.appmaster.operations.AbstractRMOperation; import org.apache.slider.server.appmaster.operations.AsyncRMOperationHandler; import org.apache.slider.server.appmaster.operations.RMOperationHandler; import org.apache.slider.server.appmaster.rpc.RpcBinder; -import org.apache.slider.server.appmaster.rpc.SliderAMPolicyProvider; import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPBImpl; import org.apache.slider.server.appmaster.rpc.SliderIPCService; import org.apache.slider.server.appmaster.security.SecurityConfiguration; @@ -384,13 +380,10 @@ public class SliderAppMaster extends AbstractSliderLaunchedService * added as a child and inited in {@link #serviceInit(Configuration)} */ private final QueueService actionQueues = new QueueService(); - private String agentOpsUrl; - private String agentStatusUrl; private YarnRegistryViewForProviders yarnRegistryOperations; //private FsDelegationTokenManager fsDelegationTokenManager; private RegisterApplicationMasterResponse amRegistrationData; private PortScanner portScanner; - private SecurityConfiguration securityConfiguration; /** * Is security enabled? @@ -752,31 +745,31 @@ public class SliderAppMaster extends AbstractSliderLaunchedService // the max value as part of its lookup rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maximumResourceCapability); -// processAMCredentials(securityConfiguration); + stripAMRMToken(); - if (securityEnabled) { - secretManager.setMasterKey( - amRegistrationData.getClientToAMTokenMasterKey().array()); - applicationACLs = amRegistrationData.getApplicationACLs(); - - //tell the server what the ACLs are - rpcService.getServer().refreshServiceAcl(serviceConf, - new SliderAMPolicyProvider()); - if (securityConfiguration.isKeytabProvided()) { - // perform keytab based login to establish kerberos authenticated - // principal. Can do so now since AM registration with RM above required - // tokens associated to principal - String principal = securityConfiguration.getPrincipal(); - //TODO read key tab file from slider-am.xml - File localKeytabFile = - securityConfiguration.getKeytabFile(new AggregateConf()); - // Now log in... - login(principal, localKeytabFile); - // obtain new FS reference that should be kerberos based and different - // than the previously cached reference - fs = new SliderFileSystem(serviceConf); - } - } +// if (securityEnabled) { +// secretManager.setMasterKey( +// amRegistrationData.getClientToAMTokenMasterKey().array()); +// applicationACLs = amRegistrationData.getApplicationACLs(); +// +// //tell the server what the ACLs are +// rpcService.getServer().refreshServiceAcl(serviceConf, +// new SliderAMPolicyProvider()); +// if (securityConfiguration.isKeytabProvided()) { +// // perform keytab based login to establish kerberos authenticated +// // principal. Can do so now since AM registration with RM above required +// // tokens associated to principal +// String principal = securityConfiguration.getPrincipal(); +// //TODO read key tab file from slider-am.xml +// File localKeytabFile = new File("todo"); +//// securityConfiguration.getKeytabFile(new AggregateConf()); +// // Now log in... +// login(principal, localKeytabFile); +// // obtain new FS reference that should be kerberos based and different +// // than the previously cached reference +// fs = new SliderFileSystem(serviceConf); +// } +// } // YARN client. // Important: this is only valid at startup, and must be executed within @@ -1010,22 +1003,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService * Process the initial user to obtain the set of user * supplied credentials (tokens were passed in by client). * Removes the AM/RM token. - * If a keytab has been provided, also strip the HDFS delegation token. - * @param securityConfig slider security config * @throws IOException */ - private void processAMCredentials(SecurityConfiguration securityConfig) + private void stripAMRMToken() throws IOException { - List filteredTokens = new ArrayList<>(3); filteredTokens.add(AMRMTokenIdentifier.KIND_NAME); - filteredTokens.add(TimelineDelegationTokenIdentifier.KIND_NAME); - - boolean keytabProvided = securityConfig.isKeytabProvided(); - log.info("Slider AM Security Mode: {}", keytabProvided ? "KEYTAB" : "TOKEN"); - if (keytabProvided) { - filteredTokens.add(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); - } containerCredentials = CredentialUtils.filterTokens( UserGroupInformation.getCurrentUser().getCredentials(), filteredTokens); @@ -1946,24 +1929,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService nmClientAsync.startContainerAsync(container, ctx); } - /** - * Build the credentials needed for containers. This will include - * getting new delegation tokens for HDFS if the AM is running - * with a keytab. - * @return a buffer of credentials - * @throws IOException - */ - - private Credentials buildContainerCredentials() throws IOException { - Credentials credentials = new Credentials(containerCredentials); - if (securityConfiguration.isKeytabProvided()) { - CredentialUtils.addSelfRenewableFSDelegationTokens( - getClusterFS().getFileSystem(), - credentials); - } - return credentials; - } - @Override // NMClientAsync.CallbackHandler public void onContainerStopped(ContainerId containerId) { // do nothing but log: container events from the AM diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java index a660958b242..220f2cacb9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java @@ -19,7 +19,6 @@ package org.apache.slider.server.appmaster.actions; import org.apache.slider.api.proto.Messages; -import org.apache.slider.core.conf.ConfTree; import org.apache.slider.server.appmaster.SliderAppMaster; import org.apache.slider.server.appmaster.state.AppState; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java index 4d483c79e82..7830a1e9ceb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java @@ -222,73 +222,4 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB { throw wrap(e); } } - - @Override - public Messages.WrappedJsonProto getModelDesired(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getModelDesired(request); - } catch (Exception e) { - throw wrap(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelDesiredAppconf(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getModelDesiredAppconf(request); - } catch (Exception e) { - throw wrap(e); - } } - - @Override - public Messages.WrappedJsonProto getModelDesiredResources(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getModelDesiredResources(request); - } catch (Exception e) { - throw wrap(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelResolved(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getModelResolved(request); - } catch (Exception e) { - throw wrap(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelResolvedAppconf(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getModelResolvedAppconf(request); - } catch (Exception e) { - throw wrap(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelResolvedResources(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getModelResolvedResources(request); - } catch (Exception e) { - throw wrap(e); - } - } - - @Override - public Messages.WrappedJsonProto getLiveResources(RpcController controller, - Messages.EmptyPayloadProto request) throws ServiceException { - try { - return real.getLiveResources(request); - } catch (Exception e) { - throw wrap(e); - } - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java index c60d6090002..1902ec122f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java @@ -267,67 +267,4 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol { throw convert(e); } } - - @Override - public Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getModelDesired(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getModelDesiredAppconf(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getModelDesiredResources(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getModelResolved(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getModelResolvedAppconf(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } - - @Override - public Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getModelResolvedResources(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } - - @Override - public Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException { - try { - return endpoint.getLiveResources(NULL_CONTROLLER, request); - } catch (ServiceException e) { - throw convert(e); - } - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java index 344495b41b6..eaa0a81558e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java @@ -32,12 +32,8 @@ import org.apache.slider.api.types.ComponentInformation; import org.apache.slider.api.types.ContainerInformation; import org.apache.slider.api.types.NodeInformation; import org.apache.slider.api.types.NodeInformationList; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; import org.apache.slider.core.exceptions.ServiceNotReadyException; import org.apache.slider.core.main.LauncherExitCodes; -import org.apache.slider.core.persist.AggregateConfSerDeser; -import org.apache.slider.core.persist.ConfTreeSerDeser; import org.apache.slider.core.persist.JsonSerDeser; import org.apache.slider.server.appmaster.AppMasterActionOperations; import org.apache.slider.server.appmaster.actions.ActionFlexCluster; @@ -401,70 +397,6 @@ public class SliderIPCService extends AbstractService } } - @Override - public Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException { - return lookupAggregateConf(MODEL_DESIRED); - } - - @Override - public Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException { - return lookupConfTree(MODEL_DESIRED_APPCONF); - } - - @Override - public Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException { - return lookupConfTree(MODEL_DESIRED_RESOURCES); - } - - @Override - public Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException { - return lookupAggregateConf(MODEL_RESOLVED); - } - - @Override - public Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException { - return lookupConfTree(MODEL_RESOLVED_APPCONF); - } - - @Override - public Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException { - return lookupConfTree(MODEL_RESOLVED_RESOURCES); - } - - @Override - public Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException { - return lookupConfTree(LIVE_RESOURCES); - } - - /** - * Helper method; look up an aggregate configuration in the cache from - * a key, or raise an exception - * @param key key to resolve - * @return the configuration - * @throws IOException on a failure - */ - - protected Messages.WrappedJsonProto lookupAggregateConf(String key) throws - IOException { - AggregateConf aggregateConf = (AggregateConf) cache.lookupWithIOE(key); - String json = AggregateConfSerDeser.toString(aggregateConf); - return wrap(json); - } - - /** - * Helper method; look up an conf tree in the cache from - * a key, or raise an exception - * @param key key to resolve - * @return the configuration - * @throws IOException on a failure - */ - protected Messages.WrappedJsonProto lookupConfTree(String key) throws - IOException { - ConfTree conf = (ConfTree) cache.lookupWithIOE(key); - String json = ConfTreeSerDeser.toString(conf); - return wrap(json); - } - private Messages.WrappedJsonProto wrap(String json) { Messages.WrappedJsonProto.Builder builder = Messages.WrappedJsonProto.newBuilder(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java index b31babc3436..37c730f105d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java @@ -16,21 +16,9 @@ */ package org.apache.slider.server.appmaster.security; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.UserGroupInformation; -import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED; -import org.apache.slider.common.SliderKeys; -import org.apache.slider.common.SliderXmlConfKeys; -import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.exceptions.SliderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; - /** * Class keeping code security information */ @@ -38,126 +26,111 @@ public class SecurityConfiguration { protected static final Logger log = LoggerFactory.getLogger(SecurityConfiguration.class); - private final Configuration configuration; - private final AggregateConf instanceDefinition; private String clusterName; - public SecurityConfiguration(Configuration configuration, - AggregateConf instanceDefinition, - String clusterName) throws SliderException { - Preconditions.checkNotNull(configuration); - Preconditions.checkNotNull(instanceDefinition); - Preconditions.checkNotNull(clusterName); - this.configuration = configuration; - this.instanceDefinition = instanceDefinition; - this.clusterName = clusterName; - validate(); - } - - private void validate() throws SliderException { - if (isSecurityEnabled()) { - String principal = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); - if(SliderUtils.isUnset(principal)) { - // if no login identity is available, fail - UserGroupInformation loginUser = null; - try { - loginUser = getLoginUser(); - } catch (IOException e) { - throw new SliderException(EXIT_UNAUTHORIZED, e, - "No principal configured for the application and " - + "exception raised during retrieval of login user. " - + "Unable to proceed with application " - + "initialization. Please ensure a value " - + "for %s exists in the application " - + "configuration or the login issue is addressed", - SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); - } - if (loginUser == null) { - throw new SliderException(EXIT_UNAUTHORIZED, - "No principal configured for the application " - + "and no login user found. " - + "Unable to proceed with application " - + "initialization. Please ensure a value " - + "for %s exists in the application " - + "configuration or the login issue is addressed", - SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); - } - } - // ensure that either local or distributed keytab mechanism is enabled, - // but not both - String keytabFullPath = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM) - .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH); - String keytabName = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM) - .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); - if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) { - throw new SliderException(EXIT_UNAUTHORIZED, - "Both a keytab on the cluster host (%s) and a" - + " keytab to be retrieved from HDFS (%s) are" - + " specified. Please configure only one keytab" - + " retrieval mechanism.", - SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, - SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); - - } - } - } - - protected UserGroupInformation getLoginUser() throws IOException { - return UserGroupInformation.getLoginUser(); - } - - public boolean isSecurityEnabled () { - return SliderUtils.isHadoopClusterSecure(configuration); - } - - public String getPrincipal () throws IOException { - String principal = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); - if (SliderUtils.isUnset(principal)) { - principal = UserGroupInformation.getLoginUser().getShortUserName(); - log.info("No principal set in the slider configuration. Will use AM login" - + " identity {} to attempt keytab-based login", principal); - } - - return principal; - } - - public boolean isKeytabProvided() { - boolean keytabProvided = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM) - .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null || - instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM). - get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null; - return keytabProvided; - - } - - public File getKeytabFile(AggregateConf instanceDefinition) - throws SliderException, IOException { - //TODO implement this for dash semantic - String keytabFullPath = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM) - .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH); - File localKeytabFile; - if (SliderUtils.isUnset(keytabFullPath)) { - // get the keytab - String keytabName = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM). - get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); - log.info("No host keytab file path specified. Will attempt to retrieve" - + " keytab file {} as a local resource for the container", - keytabName); - // download keytab to local, protected directory - localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName); - } else { - log.info("Using host keytab file {} for login", keytabFullPath); - localKeytabFile = new File(keytabFullPath); - } - return localKeytabFile; - } - +// private void validate() throws SliderException { +// if (isSecurityEnabled()) { +// String principal = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); +// if(SliderUtils.isUnset(principal)) { +// // if no login identity is available, fail +// UserGroupInformation loginUser = null; +// try { +// loginUser = getLoginUser(); +// } catch (IOException e) { +// throw new SliderException(EXIT_UNAUTHORIZED, e, +// "No principal configured for the application and " +// + "exception raised during retrieval of login user. " +// + "Unable to proceed with application " +// + "initialization. Please ensure a value " +// + "for %s exists in the application " +// + "configuration or the login issue is addressed", +// SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); +// } +// if (loginUser == null) { +// throw new SliderException(EXIT_UNAUTHORIZED, +// "No principal configured for the application " +// + "and no login user found. " +// + "Unable to proceed with application " +// + "initialization. Please ensure a value " +// + "for %s exists in the application " +// + "configuration or the login issue is addressed", +// SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); +// } +// } +// // ensure that either local or distributed keytab mechanism is enabled, +// // but not both +// String keytabFullPath = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM) +// .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH); +// String keytabName = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM) +// .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); +// if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) { +// throw new SliderException(EXIT_UNAUTHORIZED, +// "Both a keytab on the cluster host (%s) and a" +// + " keytab to be retrieved from HDFS (%s) are" +// + " specified. Please configure only one keytab" +// + " retrieval mechanism.", +// SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, +// SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); +// +// } +// } +// } +// +// protected UserGroupInformation getLoginUser() throws IOException { +// return UserGroupInformation.getLoginUser(); +// } +// +// public boolean isSecurityEnabled () { +// return SliderUtils.isHadoopClusterSecure(configuration); +// } +// +// public String getPrincipal () throws IOException { +// String principal = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL); +// if (SliderUtils.isUnset(principal)) { +// principal = UserGroupInformation.getLoginUser().getShortUserName(); +// log.info("No principal set in the slider configuration. Will use AM login" +// + " identity {} to attempt keytab-based login", principal); +// } +// +// return principal; +// } +// +// public boolean isKeytabProvided() { +// boolean keytabProvided = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM) +// .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null || +// instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM). +// get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null; +// return keytabProvided; +// +// } +// +// public File getKeytabFile(AggregateConf instanceDefinition) +// throws SliderException, IOException { +// //TODO implement this for dash semantic +// String keytabFullPath = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM) +// .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH); +// File localKeytabFile; +// if (SliderUtils.isUnset(keytabFullPath)) { +// // get the keytab +// String keytabName = instanceDefinition.getAppConfOperations() +// .getComponent(SliderKeys.COMPONENT_AM). +// get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); +// log.info("No host keytab file path specified. Will attempt to retrieve" +// + " keytab file {} as a local resource for the container", +// keytabName); +// // download keytab to local, protected directory +// localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName); +// } else { +// log.info("Using host keytab file {} for login", keytabFullPath); +// localKeytabFile = new File(keytabFullPath); +// } +// return localKeytabFile; +// } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java index 84b8140e580..3d73f3b5d71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java @@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -35,7 +34,6 @@ import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.slider.api.ClusterDescription; import org.apache.slider.api.ClusterNode; import org.apache.slider.api.InternalKeys; import org.apache.slider.api.StatusKeys; @@ -82,7 +80,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicInteger; -import static org.apache.hadoop.metrics2.lib.Interns.info; import static org.apache.slider.api.ResourceKeys.*; import static org.apache.slider.api.StateValues.*; import static org.apache.slider.api.resource.ApplicationState.STARTED; @@ -110,12 +107,6 @@ public class AppState { private Application app; - - /** - * This is a template of the cluster status - */ - private ClusterDescription clusterStatusTemplate = new ClusterDescription(); - private final Map roleStatusMap = new ConcurrentSkipListMap<>(); @@ -1325,59 +1316,6 @@ public class AppState { //TODO build container stats app.setState(ApplicationState.STARTED); return app; -/* - return app; - - ClusterDescription cd = getClusterStatus(); - long now = now(); - cd.setInfoTime(StatusKeys.INFO_STATUS_TIME_HUMAN, - StatusKeys.INFO_STATUS_TIME_MILLIS, - now); - - MapOperations infoOps = new MapOperations("info", cd.info); - infoOps.mergeWithoutOverwrite(applicationInfo); - SliderUtils.addBuildInfo(infoOps, "status"); - cd.statistics = new HashMap<>(); - - // build the map of node -> container IDs - Map> instanceMap = createRoleToInstanceMap(); - cd.instances = instanceMap; - - //build the map of node -> containers - Map> clusterNodes = - createRoleToClusterNodeMap(); - log.info("app state clusterNodes {} ", clusterNodes.toString()); - cd.status = new HashMap<>(); - cd.status.put(ClusterDescriptionKeys.KEY_CLUSTER_LIVE, clusterNodes); - - for (RoleStatus role : getRoleStatusMap().values()) { - String rolename = role.getName(); - List instances = instanceMap.get(rolename); - int nodeCount = instances != null ? instances.size(): 0; - cd.setRoleOpt(rolename, COMPONENT_INSTANCES, - role.getDesired()); - cd.setRoleOpt(rolename, ROLE_ACTUAL_INSTANCES, nodeCount); - cd.setRoleOpt(rolename, ROLE_REQUESTED_INSTANCES, role.getRequested()); - cd.setRoleOpt(rolename, ROLE_RELEASING_INSTANCES, role.getReleasing()); - cd.setRoleOpt(rolename, ROLE_FAILED_INSTANCES, role.getFailed()); - cd.setRoleOpt(rolename, ROLE_FAILED_STARTING_INSTANCES, role.getStartFailed()); - cd.setRoleOpt(rolename, ROLE_FAILED_RECENTLY_INSTANCES, role.getFailedRecently()); - cd.setRoleOpt(rolename, ROLE_NODE_FAILED_INSTANCES, role.getNodeFailed()); - cd.setRoleOpt(rolename, ROLE_PREEMPTED_INSTANCES, role.getPreempted()); - if (role.isAntiAffinePlacement()) { - cd.setRoleOpt(rolename, ROLE_PENDING_AA_INSTANCES, role.getPendingAntiAffineRequests()); - } - Map stats = role.buildStatistics(); - cd.statistics.put(rolename, stats); - } - - Map sliderstats = getLiveStatistics(); - cd.statistics.put(SliderKeys.COMPONENT_AM, sliderstats); - - // liveness - cd.liveness = getApplicationLivenessInformation(); - - return cd;*/ } /** @@ -1390,7 +1328,6 @@ public class AppState { int outstanding = (int)(stats.desired - stats.actual); li.requestsOutstanding = outstanding; li.allRequestsSatisfied = outstanding <= 0; - li.activeRequests = (int)stats.requested; return li; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java index e73dd875204..44259d386a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java @@ -22,7 +22,6 @@ import org.apache.slider.api.resource.Application; import org.apache.slider.server.appmaster.web.WebAppApi; import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionStop; import org.apache.slider.server.appmaster.web.rest.application.actions.StopResponse; -import org.apache.slider.server.appmaster.web.rest.management.ManagementResource; import org.apache.slider.server.appmaster.web.rest.publisher.PublisherResource; import org.apache.slider.server.appmaster.web.rest.registry.RegistryResource; @@ -46,22 +45,16 @@ public class AMWebServices { /** AM/WebApp info object */ private WebAppApi slider; - private final ManagementResource managementResource; private final PublisherResource publisherResource; private final RegistryResource registryResource; @Inject public AMWebServices(WebAppApi slider) { this.slider = slider; - managementResource = new ManagementResource(slider); publisherResource = new PublisherResource(slider); registryResource = new RegistryResource(slider); } - - @Path(RestPaths.SLIDER_SUBPATH_MANAGEMENT) - public ManagementResource getManagementResource() { - return managementResource; - } + //TODO add an endpoint for exposing configs @Path(RestPaths.SLIDER_SUBPATH_PUBLISHER) public PublisherResource getPublisherResource() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java deleted file mode 100644 index 261e66e29be..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.appmaster.web.rest.application.resources; - -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.server.appmaster.state.StateAccessForProviders; - -/** - * Refresh the aggregate desired model via - */ -public class AggregateModelRefresher - implements ResourceRefresher { - - private final StateAccessForProviders state; - private final boolean resolved; - - public AggregateModelRefresher(StateAccessForProviders state, - boolean resolved) { - this.state = state; - this.resolved = resolved; - } - - @Override - public AggregateConf refresh() throws Exception { - return new AggregateConf(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java deleted file mode 100644 index 190a51e995c..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.appmaster.web.rest.application.resources; - -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.apache.slider.core.persist.ConfTreeSerDeser; -import org.apache.slider.server.appmaster.state.StateAccessForProviders; - -/** - * refresher for resources and application configuration - */ -public class AppconfRefresher - implements ResourceRefresher { - - private final StateAccessForProviders state; - private final boolean unresolved; - private final boolean resources; - - public AppconfRefresher(StateAccessForProviders state, - boolean unresolved, - boolean resources) { - this.state = state; - this.unresolved = unresolved; - this.resources = resources; - } - - - @Override - public ConfTree refresh() throws Exception { - AggregateConf aggregateConf = new AggregateConf(); - ConfTree ct = resources ? aggregateConf.getResources() - : aggregateConf.getAppConf(); - return new ConfTreeSerDeser().fromInstance(ct); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java deleted file mode 100644 index b6627a70835..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.appmaster.web.rest.application.resources; - -import org.apache.slider.api.types.ComponentInformation; -import org.apache.slider.server.appmaster.state.StateAccessForProviders; - -import java.util.Map; - -public class LiveComponentsRefresher - implements ResourceRefresher> { - - private final StateAccessForProviders state; - - public LiveComponentsRefresher(StateAccessForProviders state) { - this.state = state; - } - - @Override - public Map refresh() { - return state.getComponentInfoSnapshot(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveContainersRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveContainersRefresher.java deleted file mode 100644 index 68bd8a20b03..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveContainersRefresher.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.appmaster.web.rest.application.resources; - -import org.apache.slider.api.types.ContainerInformation; -import org.apache.slider.server.appmaster.state.RoleInstance; -import org.apache.slider.server.appmaster.state.StateAccessForProviders; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Refresh the container list. - */ -public class LiveContainersRefresher implements ResourceRefresher> { - - private final StateAccessForProviders state; - - public LiveContainersRefresher(StateAccessForProviders state) { - this.state = state; - } - - @Override - public Map refresh() throws - Exception { - List containerList = state.cloneOwnedContainerList(); - - Map map = new HashMap<>(); - for (RoleInstance instance : containerList) { - ContainerInformation serialized = instance.serialize(); - map.put(serialized.containerId, serialized); - } - return map; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveNodesRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveNodesRefresher.java deleted file mode 100644 index aeb7a11d854..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveNodesRefresher.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.appmaster.web.rest.application.resources; - -import org.apache.slider.api.types.NodeInformationList; -import org.apache.slider.server.appmaster.state.StateAccessForProviders; - -/** - * Update the live nodes map - */ -public class LiveNodesRefresher - implements ResourceRefresher { - - private final StateAccessForProviders state; - - public LiveNodesRefresher(StateAccessForProviders state) { - this.state = state; - } - - @Override - public NodeInformationList refresh() { - - return new NodeInformationList(state.getNodeInformationSnapshot().values()); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java deleted file mode 100644 index 14d94007c93..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.server.appmaster.web.rest.management; - -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.server.appmaster.web.WebAppApi; -import org.apache.slider.server.appmaster.web.rest.AbstractSliderResource; -import org.apache.slider.server.appmaster.web.rest.RestPaths; -import org.apache.slider.server.appmaster.web.rest.management.resources.AggregateConfResource; -import org.apache.slider.server.appmaster.web.rest.management.resources.ConfTreeResource; -import org.apache.slider.server.appmaster.web.rest.management.resources.ResourceFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.UriInfo; -import java.net.URI; -import java.net.URL; - -/** - * - */ -public class ManagementResource extends AbstractSliderResource { - protected static final Logger log = - LoggerFactory.getLogger(ManagementResource.class); - public static final String CONFIG = "config"; - public static final String APP_UNDER_MANAGEMENT = "/app"; - - public ManagementResource(WebAppApi slider) { - super(slider); - } - - private void init(HttpServletResponse res) { - res.setContentType(null); - } - - @GET - public Response getWadl (@Context HttpServletRequest request) { - return redirectToAppWadl(request); - } - - @GET - @Path("/app") - @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML}) - public AggregateConfResource getAggregateConfiguration(@Context UriInfo uriInfo, - @Context HttpServletResponse res) { - init(res); - return ResourceFactory.createAggregateConfResource(getAggregateConf(), - uriInfo.getAbsolutePathBuilder()); - } - - @GET - @Path(APP_UNDER_MANAGEMENT+"/configurations/{config}") - @Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML}) - public ConfTreeResource getConfTreeResource(@PathParam(CONFIG) String config, - @Context UriInfo uriInfo, - @Context HttpServletResponse res) { - init(res); - AggregateConfResource aggregateConf = - ResourceFactory.createAggregateConfResource(getAggregateConf(), - uriInfo.getBaseUriBuilder() - .path(RestPaths.SLIDER_CONTEXT_ROOT) - .path(RestPaths.MANAGEMENT + APP_UNDER_MANAGEMENT)); - return aggregateConf.getConfTree(config); - } - - protected AggregateConf getAggregateConf() { - //TODO - return new AggregateConf(); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ActionsResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ActionsResource.java deleted file mode 100644 index 9b340fa100e..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ActionsResource.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.appmaster.web.rest.management.resources; - -public class ActionsResource { -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/AggregateConfResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/AggregateConfResource.java deleted file mode 100644 index 794daf9b1dd..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/AggregateConfResource.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.server.appmaster.web.rest.management.resources; - -import org.apache.slider.core.conf.AggregateConf; -import org.codehaus.jackson.annotate.JsonIgnore; -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.annotate.JsonSerialize; - -import javax.ws.rs.core.UriBuilder; -import java.util.HashMap; -import java.util.Map; - -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) -public class AggregateConfResource { - private String href; - private final ConfTreeResource resources; - private final ConfTreeResource internal; - private final ConfTreeResource appConf; - @JsonIgnore - private Map confMap; - - public AggregateConfResource(AggregateConf conf, UriBuilder uriBuilder) { - if (uriBuilder != null) { - this.href = uriBuilder.build().toASCIIString(); - resources = ResourceFactory.createConfTreeResource(conf.getResources(), - uriBuilder.clone().path("configurations").path("resources")); - internal = ResourceFactory.createConfTreeResource(conf.getInternal(), - uriBuilder.clone().path("configurations").path("internal")); - appConf = ResourceFactory.createConfTreeResource(conf.getAppConf(), - uriBuilder.clone().path("configurations").path("appConf")); - initConfMap(); - } else { - resources = null; - internal = null; - appConf = null; - } - } - - private void initConfMap() { - confMap = new HashMap<>(); - confMap.put("internal", internal); - confMap.put("resources", resources); - confMap.put("appConf", appConf); - } - - public AggregateConfResource() { - this(null, null); - } - - public ConfTreeResource getConfTree(String name) { - return confMap.get(name); - } - - public String getHref() { - return href; - } - - public void setHref(String href) { - this.href = href; - } - - public ConfTreeResource getResources() { - return resources; - } - - public ConfTreeResource getInternal() { - return internal; - } - - public ConfTreeResource getAppConf() { - return appConf; - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java deleted file mode 100644 index a44448e3999..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.server.appmaster.web.rest.management.resources; - -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.annotate.JsonSerialize; - -import javax.ws.rs.core.UriBuilder; -import java.util.Map; - -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) -public class ComponentResource { - private final Map props; - private String href; - - public ComponentResource() { - this(null, null, null, null); - } - - public ComponentResource(String name, - Map props, - UriBuilder uriBuilder, - Map pathElems) { - this.props = props; - } - - public Map getProps() { - return props; - } - - public String getHref() { - return href; - } - - public void setHref(String href) { - this.href = href; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java deleted file mode 100644 index 407bab60aeb..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.server.appmaster.web.rest.management.resources; - -import org.apache.slider.core.conf.ConfTree; -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.annotate.JsonSerialize; - -import javax.ws.rs.core.UriBuilder; -import java.util.Map; - -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) -public class ConfTreeResource { - - private final String href; - private final Map metadata; - private final Map global; - private final Map> components; - - public ConfTreeResource() { - this(null, null); - } - - public ConfTreeResource(ConfTree confTree, - UriBuilder uriBuilder) { - if (uriBuilder != null && confTree != null) { - metadata = confTree.metadata; - global = confTree.global; - components = confTree.components; - this.href = uriBuilder.build().toASCIIString(); - } else { - this.href = null; - this.metadata = null; - this.global = null; - this.components = null; - } - } - - public Map getMetadata() { - return metadata; - } - - public Map getGlobal() { - return global; - } - - public Map> getComponents() { - return components; - } - - public String getHref() { - return href; - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java deleted file mode 100644 index 9876412471e..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ResourceFactory.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.slider.server.appmaster.web.rest.management.resources; - -import org.apache.slider.core.conf.AggregateConf; -import org.apache.slider.core.conf.ConfTree; -import org.codehaus.jackson.annotate.JsonIgnoreProperties; -import org.codehaus.jackson.map.annotate.JsonSerialize; - -import javax.ws.rs.core.UriBuilder; -import java.util.Map; - -@JsonIgnoreProperties(ignoreUnknown = true) -@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) -public class ResourceFactory { - - public static AggregateConfResource createAggregateConfResource(AggregateConf conf, - UriBuilder uriBuilder) { - return new AggregateConfResource(conf, uriBuilder); - } - - public static ConfTreeResource createConfTreeResource(ConfTree confTree, - UriBuilder uriBuilder) { - return new ConfTreeResource(confTree, uriBuilder); - } - - public static ComponentResource createComponentResource(String name, - Map props, - UriBuilder uriBuilder, - Map pathElems) { - return new ComponentResource(name, props, uriBuilder, pathElems); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java index b67f069e91f..3e9b764e118 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/publisher/PublisherResource.java @@ -32,7 +32,6 @@ import org.apache.slider.server.appmaster.web.rest.AbstractSliderResource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.GET; import javax.ws.rs.Path; @@ -40,19 +39,17 @@ import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; import javax.ws.rs.core.UriInfo; import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; import java.util.Arrays; import java.util.HashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; -import java.util.Set; -import static org.apache.slider.server.appmaster.web.rest.RestPaths.*; +import static org.apache.slider.server.appmaster.web.rest.RestPaths.PUBLISHED_CONFIGURATION_REGEXP; +import static org.apache.slider.server.appmaster.web.rest.RestPaths.PUBLISHED_CONFIGURATION_SET_REGEXP; /** * This publishes configuration sets diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto index 8a0faf94fe5..f52d7a138e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto @@ -137,32 +137,4 @@ service SliderClusterProtocolPB { rpc getLiveNode(GetLiveNodeRequestProto) returns(NodeInformationProto); - -// AggregateConf getModelDesired() - rpc getModelDesired(EmptyPayloadProto) - returns(WrappedJsonProto); - - // ConfTree getModelDesiredAppconf - rpc getModelDesiredAppconf(EmptyPayloadProto) - returns(WrappedJsonProto); - - // ConfTree getModelDesiredResources - rpc getModelDesiredResources(EmptyPayloadProto) - returns(WrappedJsonProto); - -// AggregateConf getModelResolved() - rpc getModelResolved(EmptyPayloadProto) - returns(WrappedJsonProto); - - // ConfTree getModelResolvedAppconf - rpc getModelResolvedAppconf(EmptyPayloadProto) - returns(WrappedJsonProto); - - // ConfTree getModelResolvedResources - rpc getModelResolvedResources(EmptyPayloadProto) - returns(WrappedJsonProto); - - // ConfTree getLiveResources - rpc getLiveResources(EmptyPayloadProto) - returns(WrappedJsonProto); }