YARN-7165. Miscellaneous fixes in yarn-native-services. Contributed by Jian He
This commit is contained in:
parent
b9ca13e34b
commit
541b64c798
|
@ -146,7 +146,7 @@
|
|||
|
||||
<swagger-annotations-version>1.5.4</swagger-annotations-version>
|
||||
<snakeyaml.version>1.16</snakeyaml.version>
|
||||
<jcommander.version>1.30</jcommander.version>
|
||||
<jcommander.version>1.48</jcommander.version>
|
||||
</properties>
|
||||
|
||||
<dependencyManagement>
|
||||
|
|
|
@ -155,10 +155,12 @@
|
|||
<item name="Timeline Service V.2" href="hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html#Timeline_Service_v.2_REST_API"/>
|
||||
</menu>
|
||||
|
||||
<menu name="YARN Native Services" inherit="top">
|
||||
<item name="Introduction" href="hadoop-yarn/hadoop-yarn-site/native-services/NativeServicesIntro.html"/>
|
||||
<item name="Native Services API" href="hadoop-yarn/hadoop-yarn-site/native-services/NativeServicesAPI.html"/>
|
||||
<item name="Native Services Discovery" href="hadoop-yarn/hadoop-yarn-site/native-services/NativeServicesDiscovery.html"/>
|
||||
<menu name="YARN Service" inherit="top">
|
||||
<item name="Overview" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Overview.html"/>
|
||||
<item name="QuickStart" href="hadoop-yarn/hadoop-yarn-site/yarn-service/QuickStart.html"/>
|
||||
<item name="Concepts" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Concepts.html"/>
|
||||
<item name="Yarn Service API" href="hadoop-yarn/hadoop-yarn-site/yarn-service/YarnServiceAPI.html"/>
|
||||
<item name="Service Discovery" href="hadoop-yarn/hadoop-yarn-site/yarn-service/ServiceDiscovery.html"/>
|
||||
</menu>
|
||||
|
||||
<menu name="Hadoop Compatible File Systems" inherit="top">
|
||||
|
|
|
@ -31,7 +31,7 @@ function hadoop_usage
|
|||
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
|
||||
hadoop_add_option "--workers" "turn on worker mode"
|
||||
|
||||
hadoop_add_subcommand "apiserver" "run yarn-native-service rest server"
|
||||
hadoop_add_subcommand "apiserver" daemon "run the api-server for deploying/managing services on YARN"
|
||||
hadoop_add_subcommand "application" client "prints application(s) report/kill application"
|
||||
hadoop_add_subcommand "applicationattempt" client "prints applicationattempt(s) report"
|
||||
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
|
||||
|
@ -50,10 +50,9 @@ function hadoop_usage
|
|||
hadoop_add_subcommand "router" daemon "run the Router daemon"
|
||||
hadoop_add_subcommand "schedulerconf" client "Updates scheduler configuration"
|
||||
hadoop_add_subcommand "scmadmin" admin "SharedCacheManager admin tools"
|
||||
hadoop_add_subcommand "apiserver" "run yarn-native-service rest server"
|
||||
hadoop_add_subcommand "sharedcachemanager" daemon "run the SharedCacheManager daemon"
|
||||
hadoop_add_subcommand "service" "run a service"
|
||||
hadoop_add_subcommand "sharedcachemanager" admin "run the SharedCacheManager daemon"
|
||||
hadoop_add_subcommand "service" client "deploy/manage a service on YARN"
|
||||
hadoop_add_subcommand "sharedcachemanager" daemon "run the SharedCacheManager daemon"
|
||||
hadoop_add_subcommand "timelinereader" client "run the timeline reader server"
|
||||
hadoop_add_subcommand "timelineserver" daemon "run the timeline server"
|
||||
hadoop_add_subcommand "top" client "view cluster information"
|
||||
|
|
|
@ -48,17 +48,16 @@ info:
|
|||
url: http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
# the domain of the service
|
||||
host: host.mycompany.com
|
||||
port: 9191(default)
|
||||
# array of all schemes that your API supports
|
||||
schemes:
|
||||
- http
|
||||
# will be prefixed to all paths
|
||||
basePath: /ws/v1/
|
||||
consumes:
|
||||
- application/json
|
||||
produces:
|
||||
- application/json
|
||||
paths:
|
||||
/services/version:
|
||||
/ws/v1/services/version:
|
||||
get:
|
||||
summary: Get current version of the API server.
|
||||
description: Get current version of the API server.
|
||||
|
@ -66,7 +65,7 @@ paths:
|
|||
200:
|
||||
description: Successful request
|
||||
|
||||
/services:
|
||||
/ws/v1/services:
|
||||
get:
|
||||
summary: (TBD) List of services running in the cluster.
|
||||
description: Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name.
|
||||
|
@ -103,7 +102,7 @@ paths:
|
|||
schema:
|
||||
$ref: '#/definitions/ServiceStatus'
|
||||
|
||||
/services/{service_name}:
|
||||
/ws/v1/services/{service_name}:
|
||||
put:
|
||||
summary: Update a service or upgrade the binary version of the components of a running service
|
||||
description: Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service.
|
||||
|
@ -174,7 +173,7 @@ paths:
|
|||
description: Unexpected error
|
||||
schema:
|
||||
$ref: '#/definitions/ServiceStatus'
|
||||
/services/{service_name}/components/{component_name}:
|
||||
/ws/v1/services/{service_name}/components/{component_name}:
|
||||
put:
|
||||
summary: Flex a component's number of instances.
|
||||
description: Set a component's desired number of instanes
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
|
|||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
|
||||
import org.apache.hadoop.yarn.service.client.params.SliderAMArgs;
|
||||
import org.apache.hadoop.yarn.service.client.params.ServiceAMArgs;
|
||||
import org.apache.hadoop.yarn.service.monitor.ServiceMonitor;
|
||||
import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
|
||||
import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
|
||||
|
@ -51,7 +51,7 @@ public class ServiceMaster extends CompositeService {
|
|||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ServiceMaster.class);
|
||||
|
||||
private static SliderAMArgs amArgs;
|
||||
private static ServiceAMArgs amArgs;
|
||||
protected ServiceContext context;
|
||||
|
||||
public ServiceMaster(String name) {
|
||||
|
@ -108,7 +108,7 @@ public class ServiceMaster extends CompositeService {
|
|||
}
|
||||
|
||||
protected Path getAppDir() {
|
||||
return new Path(amArgs.getAppDefPath()).getParent();
|
||||
return new Path(amArgs.getServiceDefPath()).getParent();
|
||||
}
|
||||
|
||||
protected ServiceScheduler createServiceScheduler(ServiceContext context)
|
||||
|
@ -119,7 +119,7 @@ public class ServiceMaster extends CompositeService {
|
|||
protected void loadApplicationJson(ServiceContext context,
|
||||
SliderFileSystem fs) throws IOException {
|
||||
context.service = ServiceApiUtil
|
||||
.loadServiceFrom(fs, new Path(amArgs.getAppDefPath()));
|
||||
.loadServiceFrom(fs, new Path(amArgs.getServiceDefPath()));
|
||||
LOG.info(context.service.toString());
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ public class ServiceMaster extends CompositeService {
|
|||
public static void main(String[] args) throws Exception {
|
||||
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
|
||||
StringUtils.startupShutdownMessage(ServiceMaster.class, args, LOG);
|
||||
amArgs = new SliderAMArgs(args);
|
||||
amArgs = new ServiceAMArgs(args);
|
||||
amArgs.parse();
|
||||
try {
|
||||
ServiceMaster serviceMaster = new ServiceMaster("Service Master");
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
|
||||
package org.apache.hadoop.yarn.service.client;
|
||||
|
||||
import com.beust.jcommander.ParameterException;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.service.api.records.Service;
|
||||
import org.apache.hadoop.yarn.service.client.params.ClientArgs;
|
||||
import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException;
|
||||
import org.apache.hadoop.yarn.service.exceptions.SliderException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -101,7 +102,7 @@ public class ServiceCLI {
|
|||
ClientArgs clientArgs = new ClientArgs(args);
|
||||
try {
|
||||
clientArgs.parse();
|
||||
} catch (BadCommandArgumentsException e) {
|
||||
} catch (ParameterException | SliderException e) {
|
||||
System.err.println(e.getMessage());
|
||||
System.exit(-1);
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ public class ServiceClient extends CompositeService
|
|||
|
||||
private Service loadAppJsonFromLocalFS(
|
||||
AbstractClusterBuildingActionArgs args) throws IOException {
|
||||
File file = args.getAppDef();
|
||||
File file = args.getFile();
|
||||
Path filePath = new Path(file.getAbsolutePath());
|
||||
LOG.info("Loading app json from: " + filePath);
|
||||
Service service = jsonSerDeser
|
||||
|
@ -166,7 +166,6 @@ public class ServiceClient extends CompositeService
|
|||
if (args.lifetime > 0) {
|
||||
service.setLifetime(args.lifetime);
|
||||
}
|
||||
service.setName(args.getClusterName());
|
||||
return service;
|
||||
}
|
||||
|
||||
|
@ -407,7 +406,8 @@ public class ServiceClient extends CompositeService
|
|||
|
||||
public int actionDestroy(String serviceName) throws Exception {
|
||||
ServiceApiUtil.validateNameFormat(serviceName, getConfig());
|
||||
verifyNoLiveAppInRM(serviceName, "Destroy");
|
||||
verifyNoLiveAppInRM(serviceName, "destroy");
|
||||
|
||||
Path appDir = fs.buildClusterDirPath(serviceName);
|
||||
FileSystem fileSystem = fs.getFileSystem();
|
||||
// remove from the appId cache
|
||||
|
@ -498,9 +498,15 @@ public class ServiceClient extends CompositeService
|
|||
request.setApplicationStates(liveStates);
|
||||
List<ApplicationReport> reports = yarnClient.getApplications(request);
|
||||
if (!reports.isEmpty()) {
|
||||
throw new YarnException(
|
||||
"Failed to " + action + " service, as " + serviceName
|
||||
+ " already exists.");
|
||||
String message = "";
|
||||
if (action.equals("destroy")) {
|
||||
message = "Failed to destroy service " + serviceName
|
||||
+ ", because it is still running.";
|
||||
} else {
|
||||
message = "Failed to " + action + " service " + serviceName
|
||||
+ ", because it already exists.";
|
||||
}
|
||||
throw new YarnException(message);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -592,7 +598,7 @@ public class ServiceClient extends CompositeService
|
|||
CLI.add(ServiceMaster.class.getCanonicalName());
|
||||
CLI.add(ACTION_CREATE, serviceName);
|
||||
//TODO debugAM CLI.add(Arguments.ARG_DEBUG)
|
||||
CLI.add(Arguments.ARG_CLUSTER_URI, new Path(appRootDir, serviceName + ".json"));
|
||||
CLI.add(Arguments.ARG_SERVICE_DEF_PATH, new Path(appRootDir, serviceName + ".json"));
|
||||
// pass the registry binding
|
||||
CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT,
|
||||
RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
|
||||
|
|
|
@ -40,23 +40,11 @@ public abstract class AbstractActionArgs extends ArgOps implements Arguments {
|
|||
protected AbstractActionArgs() {
|
||||
}
|
||||
|
||||
/**
|
||||
* URI/binding to the filesystem
|
||||
*/
|
||||
@Parameter(names = {ARG_FILESYSTEM, ARG_FILESYSTEM_LONG},
|
||||
description = "Filesystem Binding")
|
||||
public String filesystemBinding;
|
||||
|
||||
@Parameter(names = {ARG_BASE_PATH},
|
||||
description = "Service base path on the filesystem",
|
||||
converter = PathArgumentConverter.class)
|
||||
public Path basePath;
|
||||
|
||||
/**
|
||||
* This is the default parameter
|
||||
*/
|
||||
@Parameter
|
||||
public final List<String> parameters = new ArrayList<>();
|
||||
public List<String> parameters = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* get the name: relies on arg 1 being the cluster name in all operations
|
||||
|
@ -76,29 +64,22 @@ public abstract class AbstractActionArgs extends ArgOps implements Arguments {
|
|||
|
||||
*/
|
||||
|
||||
@Parameter(names = ARG_DEFINE, arity = 1, description = "Definitions")
|
||||
public final List<String> definitions = new ArrayList<>();
|
||||
@Parameter(names = ARG_DEFINE, arity = 1, description = "Definitions", hidden = true)
|
||||
public List<String> definitions = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* System properties
|
||||
*/
|
||||
@Parameter(names = {ARG_SYSPROP}, arity = 1,
|
||||
description = "system properties in the form name value" +
|
||||
" These are set after the JVM is started.")
|
||||
public final List<String> sysprops = new ArrayList<>(0);
|
||||
" These are set after the JVM is started.",
|
||||
hidden = true)
|
||||
public List<String> sysprops = new ArrayList<>(0);
|
||||
|
||||
|
||||
@Parameter(names = {ARG_MANAGER_SHORT, ARG_MANAGER},
|
||||
description = "Binding (usually hostname:port) of the YARN resource manager")
|
||||
public String manager;
|
||||
|
||||
|
||||
@Parameter(names = ARG_DEBUG, description = "Debug mode")
|
||||
@Parameter(names = ARG_DEBUG, description = "Debug mode", hidden = true)
|
||||
public boolean debug = false;
|
||||
|
||||
@Parameter(names = {ARG_HELP}, description = "Help", help = true)
|
||||
public boolean help = false;
|
||||
|
||||
/**
|
||||
* Get the min #of params expected
|
||||
* @return the min number of params in the {@link #parameters} field
|
||||
|
|
|
@ -19,13 +19,8 @@
|
|||
package org.apache.hadoop.yarn.service.client.params;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.ParametersDelegate;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Abstract Action to build things; shares args across build and
|
||||
|
@ -33,12 +28,12 @@ import java.util.Map;
|
|||
*/
|
||||
public abstract class AbstractClusterBuildingActionArgs
|
||||
extends AbstractActionArgs {
|
||||
@Parameter(names = {ARG_APPDEF},
|
||||
description = "Template service definition file in JSON format.")
|
||||
public File appDef;
|
||||
@Parameter(names = { ARG_FILE, ARG_FILE_SHORT }, required = true,
|
||||
description = "The path to the service definition file in JSON format.")
|
||||
public File file;
|
||||
|
||||
public File getAppDef() {
|
||||
return appDef;
|
||||
public File getFile() {
|
||||
return file;
|
||||
}
|
||||
|
||||
@Parameter(names = {
|
||||
|
@ -48,11 +43,4 @@ public abstract class AbstractClusterBuildingActionArgs
|
|||
@Parameter(names = {
|
||||
ARG_LIFETIME }, description = "Lifetime of the service from the time of request")
|
||||
public long lifetime;
|
||||
|
||||
@ParametersDelegate
|
||||
public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
|
||||
|
||||
@ParametersDelegate
|
||||
public OptionArgsDelegate optionsDelegate =
|
||||
new OptionArgsDelegate();
|
||||
}
|
||||
|
|
|
@ -28,4 +28,9 @@ public class ActionBuildArgs extends AbstractClusterBuildingActionArgs {
|
|||
public String getActionName() {
|
||||
return SliderActions.ACTION_BUILD;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinParams() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,5 +29,10 @@ public class ActionCreateArgs extends AbstractClusterBuildingActionArgs {
|
|||
public String getActionName() {
|
||||
return SliderActions.ACTION_CREATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinParams() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,8 +32,8 @@ public class ActionDependencyArgs extends AbstractActionArgs {
|
|||
return SliderActions.ACTION_DEPENDENCY;
|
||||
}
|
||||
|
||||
@Parameter(names = { ARG_UPLOAD },
|
||||
description = "Upload AM and agent libraries to HDFS for this client")
|
||||
@Parameter(names = { ARG_UPLOAD }, required = true,
|
||||
description = "Upload AM libraries to HDFS for this client version")
|
||||
public boolean upload;
|
||||
|
||||
@Parameter(names = { ARG_OVERWRITE },
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.service.client.params;
|
||||
|
||||
import com.beust.jcommander.Parameters;
|
||||
|
||||
@Parameters(commandNames = { SliderActions.ACTION_EXAMPLES},
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_EXAMPLES)
|
||||
public class ActionExamples {
|
||||
}
|
|
@ -18,12 +18,17 @@
|
|||
|
||||
package org.apache.hadoop.yarn.service.client.params;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
|
||||
@Parameters(commandNames = { SliderActions.ACTION_UPDATE},
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_UPDATE)
|
||||
|
||||
public class ActionUpdateArgs extends AbstractClusterBuildingActionArgs {
|
||||
public class ActionUpdateArgs extends AbstractActionArgs {
|
||||
|
||||
@Parameter(names = {
|
||||
ARG_LIFETIME }, description = "Lifetime of the service from the time of request")
|
||||
public long lifetime;
|
||||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
|
|
|
@ -27,7 +27,8 @@ package org.apache.hadoop.yarn.service.client.params;
|
|||
*/
|
||||
public interface Arguments {
|
||||
|
||||
String ARG_APPDEF = "--appdef";
|
||||
String ARG_FILE = "--file";
|
||||
String ARG_FILE_SHORT = "-f";
|
||||
String ARG_BASE_PATH = "--basepath";
|
||||
String ARG_COMPONENT = "--component";
|
||||
String ARG_COMPONENT_SHORT = "--comp";
|
||||
|
@ -95,9 +96,7 @@ public interface Arguments {
|
|||
RIGHT PLACE IN THE LIST
|
||||
*/
|
||||
|
||||
/**
|
||||
* server: URI for the cluster
|
||||
*/
|
||||
String ARG_CLUSTER_URI = "-cluster-uri";
|
||||
// Tha path in hdfs to be read by Service AM
|
||||
String ARG_SERVICE_DEF_PATH = "-cluster-uri";
|
||||
|
||||
}
|
||||
|
|
|
@ -85,16 +85,6 @@ public class ClientArgs extends CommonArgs {
|
|||
public void applyDefinitions(Configuration conf) throws
|
||||
BadCommandArgumentsException {
|
||||
super.applyDefinitions(conf);
|
||||
//RM
|
||||
if (getManager() != null) {
|
||||
log.debug("Setting RM to {}", getManager());
|
||||
conf.set(YarnConfiguration.RM_ADDRESS, getManager());
|
||||
}
|
||||
if (getBasePath() != null) {
|
||||
log.debug("Setting basePath to {}", getBasePath());
|
||||
conf.set(YarnServiceConf.YARN_SERVICE_BASE_PATH,
|
||||
getBasePath().toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -156,12 +156,8 @@ public abstract class CommonArgs extends ArgOps implements SliderActions,
|
|||
try {
|
||||
commander.parse(args);
|
||||
} catch (ParameterException e) {
|
||||
throw new BadCommandArgumentsException(e, "%s in %s",
|
||||
e.toString(),
|
||||
(args != null
|
||||
? (SliderUtils.join(args,
|
||||
" ", false))
|
||||
: "[]"));
|
||||
commander.usage(args[0]);
|
||||
throw e;
|
||||
}
|
||||
//now copy back to this class some of the attributes that are common to all
|
||||
//actions
|
||||
|
@ -251,31 +247,11 @@ public abstract class CommonArgs extends ArgOps implements SliderActions,
|
|||
applyDefinitions(definitionMap, conf);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* If the Filesystem binding was provided, it overrides anything in
|
||||
* the configuration
|
||||
* @param conf configuration
|
||||
*/
|
||||
public void applyFileSystemBinding(Configuration conf) {
|
||||
ArgOps.applyFileSystemBinding(getFilesystemBinding(), conf);
|
||||
}
|
||||
|
||||
public boolean isDebug() {
|
||||
return coreAction.debug;
|
||||
}
|
||||
|
||||
|
||||
public String getFilesystemBinding() {
|
||||
return coreAction.filesystemBinding;
|
||||
}
|
||||
|
||||
public Path getBasePath() { return coreAction.basePath; }
|
||||
|
||||
public String getManager() {
|
||||
return coreAction.manager;
|
||||
}
|
||||
|
||||
public String getAction() {
|
||||
return commander.getParsedCommand();
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@ package org.apache.hadoop.yarn.service.client.params;
|
|||
/**
|
||||
* Parameters sent by the Client to the AM
|
||||
*/
|
||||
public class SliderAMArgs extends CommonArgs {
|
||||
public class ServiceAMArgs extends CommonArgs {
|
||||
|
||||
SliderAMCreateAction createAction = new SliderAMCreateAction();
|
||||
ServiceAMCreateAction createAction = new ServiceAMCreateAction();
|
||||
|
||||
public SliderAMArgs(String[] args) {
|
||||
public ServiceAMArgs(String[] args) {
|
||||
super(args);
|
||||
}
|
||||
|
||||
|
@ -35,16 +35,9 @@ public class SliderAMArgs extends CommonArgs {
|
|||
addActions(createAction);
|
||||
}
|
||||
|
||||
public String getImage() {
|
||||
return createAction.image;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the URI in the FS to the Slider cluster; the conf file (and any
|
||||
* other cluster-specifics) can be picked up here
|
||||
*/
|
||||
public String getAppDefPath() {
|
||||
return createAction.sliderClusterURI;
|
||||
// This is the path in hdfs to the service definition JSON file
|
||||
public String getServiceDefPath() {
|
||||
return createAction.serviceDefPath;
|
||||
}
|
||||
|
||||
/**
|
|
@ -20,54 +20,18 @@ package org.apache.hadoop.yarn.service.client.params;
|
|||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.beust.jcommander.ParametersDelegate;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
|
||||
@Parameters(commandNames = { SliderActions.ACTION_CREATE},
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_CREATE)
|
||||
|
||||
public class SliderAMCreateAction extends AbstractActionArgs implements
|
||||
LaunchArgsAccessor {
|
||||
|
||||
public class ServiceAMCreateAction extends AbstractActionArgs {
|
||||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_CREATE;
|
||||
}
|
||||
|
||||
@Parameter(names = ARG_IMAGE, description = "image", required = false)
|
||||
public String image;
|
||||
|
||||
/**
|
||||
* This is the URI in the FS to the Slider cluster; the conf file (and any
|
||||
* other cluster-specifics) can be picked up here
|
||||
*/
|
||||
@Parameter(names = ARG_CLUSTER_URI,
|
||||
description = "URI to the Slider cluster", required = true)
|
||||
public String sliderClusterURI;
|
||||
|
||||
@ParametersDelegate LaunchArgsDelegate launchArgs = new LaunchArgsDelegate();
|
||||
|
||||
@Override
|
||||
public String getRmAddress() {
|
||||
return launchArgs.getRmAddress();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getWaittime() {
|
||||
return launchArgs.getWaittime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWaittime(int waittime) {
|
||||
launchArgs.setWaittime(waittime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public File getOutputFile() {
|
||||
return launchArgs.getOutputFile();
|
||||
}
|
||||
|
||||
@Parameter(names = ARG_SERVICE_DEF_PATH,
|
||||
description = "Path to the service definition JSON file", required = true)
|
||||
public String serviceDefPath;
|
||||
}
|
|
@ -19,9 +19,7 @@
|
|||
package org.apache.hadoop.yarn.service.client.params;
|
||||
|
||||
/**
|
||||
* Actions.
|
||||
* Only some of these are supported by specific Slider Services; they
|
||||
* are listed here to ensure the names are consistent
|
||||
* Actions by client
|
||||
*/
|
||||
public interface SliderActions {
|
||||
String ACTION_BUILD = "build";
|
||||
|
@ -32,11 +30,11 @@ public interface SliderActions {
|
|||
String ACTION_UPGRADE = "upgrade";
|
||||
String ACTION_DESTROY = "destroy";
|
||||
String ACTION_EXISTS = "exists";
|
||||
String ACTION_EXAMPLES = "examples";
|
||||
String ACTION_FLEX = "flex";
|
||||
String ACTION_STOP = "stop";
|
||||
String ACTION_HELP = "help";
|
||||
String ACTION_INSTALL_KEYTAB = "install-keytab";
|
||||
String ACTION_KDIAG = "kdiag";
|
||||
String ACTION_KEYTAB = "keytab";
|
||||
String ACTION_LIST = "list";
|
||||
|
||||
|
@ -50,7 +48,7 @@ public interface SliderActions {
|
|||
String DESCRIBE_ACTION_BUILD =
|
||||
"Build a service specification, but do not start it";
|
||||
String DESCRIBE_ACTION_CREATE =
|
||||
"Build and start a service, it's equivalent to first invoke build and then start";
|
||||
"Create a service, it's equivalent to first invoke build and then start";
|
||||
String DESCRIBE_ACTION_DEPENDENCY =
|
||||
"Yarn service framework dependency (libraries) management";
|
||||
String DESCRIBE_ACTION_UPDATE =
|
||||
|
@ -61,6 +59,7 @@ public interface SliderActions {
|
|||
"Destroy a stopped service, service must be stopped first before destroying.";
|
||||
String DESCRIBE_ACTION_EXISTS =
|
||||
"Probe for a service running";
|
||||
String DESCRIBE_ACTION_EXAMPLES = "Run an example service on YARN";
|
||||
String DESCRIBE_ACTION_FLEX = "Flex a service's component by increasing or decreasing the number of containers.";
|
||||
String DESCRIBE_ACTION_FREEZE =
|
||||
"Stop a running service";
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.service.api.records.Component;
|
|||
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
|
||||
import org.apache.hadoop.yarn.service.api.records.Configuration;
|
||||
import org.apache.hadoop.yarn.service.api.records.Container;
|
||||
import org.apache.hadoop.yarn.service.api.records.ServiceState;
|
||||
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
|
||||
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -96,8 +97,8 @@ public class ServiceTimelinePublisher extends CompositeService {
|
|||
// create info keys
|
||||
Map<String, Object> entityInfos = new HashMap<String, Object>();
|
||||
entityInfos.put(ServiceTimelineMetricsConstants.NAME, service.getName());
|
||||
// entityInfos.put(ServiceTimelineMetricsConstants.STATE,
|
||||
// service.getState().toString());
|
||||
entityInfos.put(ServiceTimelineMetricsConstants.STATE,
|
||||
ServiceState.STARTED.toString());
|
||||
entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME,
|
||||
currentTimeMillis);
|
||||
entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS,
|
||||
|
|
|
@ -36,7 +36,7 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_APPDEF;
|
||||
import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_FILE;
|
||||
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH;
|
||||
|
||||
/**
|
||||
|
@ -56,12 +56,12 @@ public class TestBuildExternalComponents {
|
|||
}
|
||||
}
|
||||
|
||||
// 1. Build the appDef and store on fs
|
||||
// 1. Build the def file and store on fs
|
||||
// 2. check component names
|
||||
private void buildAndCheckComponents(String appName, String appDef,
|
||||
SliderFileSystem sfs, Set<String> names) throws Throwable {
|
||||
String[] args =
|
||||
{ "build", appName, ARG_APPDEF, ExampleAppJson.resourceName(appDef) };
|
||||
{ "build", ARG_FILE, ExampleAppJson.resourceName(appDef) };
|
||||
ClientArgs clientArgs = new ClientArgs(args);
|
||||
clientArgs.parse();
|
||||
ServiceCLI cli = new ServiceCLI() {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.service.ClientAMProtocol;
|
||||
import org.apache.hadoop.yarn.service.api.records.Component;
|
||||
import org.apache.hadoop.yarn.service.client.params.ClientArgs;
|
||||
|
@ -43,7 +42,7 @@ import java.util.List;
|
|||
|
||||
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS;
|
||||
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS;
|
||||
import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_APPDEF;
|
||||
import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_FILE;
|
||||
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
|
@ -54,9 +53,9 @@ public class TestServiceCLI {
|
|||
private ServiceCLI cli;
|
||||
private SliderFileSystem fs;
|
||||
|
||||
private void buildApp(String appName, String appDef) throws Throwable {
|
||||
private void buildApp(String appDef) throws Throwable {
|
||||
String[] args =
|
||||
{ "build", appName, ARG_APPDEF, ExampleAppJson.resourceName(appDef) };
|
||||
{ "build", ARG_FILE, ExampleAppJson.resourceName(appDef)};
|
||||
ClientArgs clientArgs = new ClientArgs(args);
|
||||
clientArgs.parse();
|
||||
cli.exec(clientArgs);
|
||||
|
@ -115,34 +114,35 @@ public class TestServiceCLI {
|
|||
// Test flex components count are persisted.
|
||||
@Test
|
||||
public void testFlexComponents() throws Throwable {
|
||||
buildApp("service-1", ExampleAppJson.APP_JSON);
|
||||
|
||||
checkCompCount("master", 1L);
|
||||
String serviceName = "app-1";
|
||||
buildApp(ExampleAppJson.APP_JSON);
|
||||
checkCompCount("master",serviceName, 1L);
|
||||
|
||||
// increase by 2
|
||||
String[] flexUpArgs = {"flex", "service-1", "--component", "master" , "+2"};
|
||||
String[] flexUpArgs = {"flex", serviceName, "--component", "master" , "+2"};
|
||||
ClientArgs clientArgs = new ClientArgs(flexUpArgs);
|
||||
clientArgs.parse();
|
||||
cli.exec(clientArgs);
|
||||
checkCompCount("master", 3L);
|
||||
checkCompCount("master", serviceName, 3L);
|
||||
|
||||
// decrease by 1
|
||||
String[] flexDownArgs = {"flex", "service-1", "--component", "master", "-1"};
|
||||
String[] flexDownArgs = {"flex", serviceName, "--component", "master", "-1"};
|
||||
clientArgs = new ClientArgs(flexDownArgs);
|
||||
clientArgs.parse();
|
||||
cli.exec(clientArgs);
|
||||
checkCompCount("master", 2L);
|
||||
checkCompCount("master", serviceName, 2L);
|
||||
|
||||
String[] flexAbsoluteArgs = {"flex", "service-1", "--component", "master", "10"};
|
||||
String[] flexAbsoluteArgs = {"flex", serviceName, "--component", "master", "10"};
|
||||
clientArgs = new ClientArgs(flexAbsoluteArgs);
|
||||
clientArgs.parse();
|
||||
cli.exec(clientArgs);
|
||||
checkCompCount("master", 10L);
|
||||
checkCompCount("master", serviceName, 10L);
|
||||
}
|
||||
|
||||
private void checkCompCount(String compName, long count) throws IOException {
|
||||
private void checkCompCount(String compName, String serviceName, long count)
|
||||
throws IOException {
|
||||
List<Component> components =
|
||||
ServiceApiUtil.getComponents(fs, "service-1");
|
||||
ServiceApiUtil.getComponents(fs, serviceName);
|
||||
for (Component component : components) {
|
||||
if (component.getName().equals(compName)) {
|
||||
Assert.assertEquals(count, component.getNumberOfContainers().longValue());
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "service-1",
|
||||
"name": "app-1",
|
||||
"id" : "application_1503358878042_0011",
|
||||
"lifetime": "3600",
|
||||
"launch_command": "sleep 3600",
|
||||
|
|
|
@ -95,7 +95,7 @@ public interface RegistryConstants {
|
|||
/**
|
||||
* Default DNS port number.
|
||||
*/
|
||||
int DEFAULT_DNS_PORT = 53;
|
||||
int DEFAULT_DNS_PORT = 5353;
|
||||
|
||||
/**
|
||||
* DNSSEC Enabled?
|
||||
|
|
|
@ -22,7 +22,7 @@ Overview
|
|||
|
||||
YARN commands are invoked by the bin/yarn script. Running the yarn script without any arguments prints the description for all commands.
|
||||
|
||||
Usage: `yarn [SHELL_OPTIONS] COMMAND [GENERIC_OPTIONS] [COMMAND_OPTIONS]`
|
||||
Usage: `yarn [SHELL_OPTIONS] COMMAND [GENERIC_OPTIONS] [SUB_COMMAND] [COMMAND_OPTIONS]`
|
||||
|
||||
YARN has an option parsing framework that employs parsing generic options as well as running classes.
|
||||
|
||||
|
@ -69,6 +69,56 @@ Usage: `yarn applicationattempt [options] `
|
|||
|
||||
prints applicationattempt(s) report
|
||||
|
||||
### `service`
|
||||
Usage `yarn service [sub-command] [service-name] [options]`
|
||||
|
||||
#### `SUB_COMMAND`
|
||||
|
||||
* `build`: Build a service with its specifications, but do not start it.
|
||||
```
|
||||
Usage: yarn service build --file [file]
|
||||
```
|
||||
|
||||
| COMMAND\_OPTIONS | Description |
|
||||
|:---- |:---- |
|
||||
| --file or -f | The local path to the service definition file |
|
||||
|
||||
* `create`: create a service, it's equivalent to first invoke build and then start.
|
||||
```
|
||||
Usage: yarn service create --file [file]
|
||||
```
|
||||
| COMMAND\_OPTIONS | Description |
|
||||
|:---- |:---- |
|
||||
| --file or -f | The local path to the service definition file |
|
||||
|
||||
* `dependency`: Yarn service framework dependency (libraries) management.
|
||||
```
|
||||
Usage: yarn service dependency [options]
|
||||
```
|
||||
| COMMAND\_OPTIONS | Description |
|
||||
|:---- |:---- |
|
||||
| --upload | Pre-upload the dependency jars onto HDFS to expediate service launch process. |
|
||||
|
||||
* `destroy`: Destroy a stopped service, service must be stopped first before destroying.
|
||||
```
|
||||
Usage: yarn service destroy [service-name]
|
||||
```
|
||||
* `flex`: Flex a service's component by increasing or decreasing the number of containers.
|
||||
```
|
||||
Usage: yarn service flex [service-name] --component [component-name] [count]
|
||||
```
|
||||
| COMMAND\_OPTIONS | Description |
|
||||
|:---- |:---- |
|
||||
| --component [component-name] [count] | Specifies the component name and its number of containers. e.g. +1 incr by 1, -2 decr by 2, and 3 makes final count 3.|
|
||||
* `status`: Get the status of a service.
|
||||
```
|
||||
Usage: yarn service status [service-name]
|
||||
```
|
||||
* `start`: Start a service with pre-built specification or a previously stopped service.
|
||||
```
|
||||
Usage: yarn service start [service-name]
|
||||
```
|
||||
|
||||
### `classpath`
|
||||
|
||||
Usage: `yarn classpath [--glob |--jar <path> |-h |--help]`
|
||||
|
@ -273,6 +323,10 @@ Usage: `yarn timelineserver`
|
|||
|
||||
Start the TimeLineServer
|
||||
|
||||
### apiserver
|
||||
Usage: `yarn apiserver`
|
||||
Start the API-server for deploying/managing services on YARN
|
||||
|
||||
Files
|
||||
-----
|
||||
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# Introduction: YARN Native Services
|
||||
|
||||
## Overview
|
||||
YARN Native Services provides first class framework support and APIs to host long running services natively in YARN. In addition to launching services, the new APIs support performing lifecycle management operations, such as flex service components up/down, manage lifetime, upgrade the service to a newer version, and stop/restart/delete the service.
|
||||
|
||||
The native services capabilities are built on the existing low-level resource management API provided by YARN that can support any type of application. Other application frameworks like Hadoop MapReduce already expose higher level APIs that users can leverage to run applications on top of YARN. With the advent of containerization technologies like Docker, providing first class support and APIs for long running services at the framework level made sense.
|
||||
|
||||
Relying on a framework has the advantage of exposing a simpler usage model to the user by enabling service configuration and launch through specification (without writing new code), as well as hiding complex low-level details including state management and fault-tolerance etc. Users/operators of existing services typically like to avoid modifying an existing service to be aware of YARN. With first class support capable of running a single Docker image as well as complex assemblies comprised of multiple Docker images, there is no need for service owners to be aware of YARN. Developers of new services do not have to worry about YARN internals and only need to focus on containerization of their service(s).
|
||||
|
||||
## First class support for services
|
||||
In order to natively provide first class support for long running services, several new features and improvements have been made at the framework level.
|
||||
|
||||
### Incorporate Apache Slider into Apache YARN
|
||||
Apache Slider, which existed as a separate incubator project has been merged into YARN to kick start the first class support. Apache Slider is a universal Application Master (AM) which had several key features built in - fault tolerance of service containers and AM, work-preserving AM restarts, service logs management, service management like flex up/down, stop/start, and rolling upgrade to newer service versions, etc. Of course lot more work has been done on top of what Apache Slider brought in, details of which follow.
|
||||
|
||||
### Native Services API
|
||||
A significant effort has gone into simplifying the user facing story for building services. In the past, bringing a new service to YARN was not a pleasant experience. The APIs of existing frameworks are either too low-level (native YARN), require writing new code (for frameworks with programmatic APIs) or require writing a complex spec (for declarative frameworks).
|
||||
|
||||
The new REST APIs are very simple to use. The REST layer acts as a single point of entry for creation and lifecycle management of YARN services. Services here can range from simple single-component apps to the most complex, multi-component applications needing special orchestration needs.
|
||||
|
||||
Plan is to make this a unified REST based entry point for other important features like resource-profile management ([YARN-3926](https://issues.apache.org/jira/browse/YARN-4793)), package-definitions' lifecycle-management and service-discovery ([YARN-913](https://issues.apache.org/jira/browse/YARN-913)/[YARN-4757](https://issues.apache.org/jira/browse/YARN-4757)).
|
||||
|
||||
### Native Services Discovery
|
||||
The new discovery solution exposes the registry information through a more generic and widely used mechanism: DNS. Service Discovery via DNS uses the well-known DNS interfaces to browse the network for services. Having the registry information exposed via DNS simplifies the life of services.
|
||||
|
||||
The previous read mechanisms of YARN Service Registry were limited to a registry specific (java) API and a REST interface. In practice, this made it very difficult for wiring up existing clients and services. For e.g., dynamic configuration of dependent endpoints of a service was not easy to implement using the registry-read mechanisms, **without** code-changes to existing services. These are solved by the DNS based service discovery.
|
||||
|
||||
### Scheduling
|
||||
[YARN-6592](https://issues.apache.org/jira/browse/YARN-6592) covers a host of scheduling features that are useful for short-running applications and services alike. Below, are a few very important YARN core features that help schedule services better. Without these, running services on YARN is a hassle.
|
||||
|
||||
* Affinity (TBD)
|
||||
* Anti-affinity (TBD)
|
||||
* Gang scheduling (TBD)
|
||||
* Malleable container sizes ([YARN-1197](https://issues.apache.org/jira/browse/YARN-1197))
|
||||
|
||||
### Resource Profiles
|
||||
YARN always had support for memory as a resource, inheriting it from Hadoop-(1.x)’s MapReduce platform. Later support for CPU as a resource ([YARN-2](https://issues.apache.org/jira/browse/YARN-2)/[YARN-3](https://issues.apache.org/jira/browse/YARN-3)) was added. Multiple efforts added support for various other resource-types in YARN such as disk ([YARN-2139](https://issues.apache.org/jira/browse/YARN-2139)), and network ([YARN-2140](https://issues.apache.org/jira/browse/YARN-2140)), specifically benefiting long running services.
|
||||
|
||||
In many systems outside of YARN, users are already accustomed to specifying their desired ‘box’ of requirements where each box comes with a predefined amount of each resources. Admins would define various available box-sizes (small, medium, large etc) and users would pick the ones they desire and everybody is happy. In [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926), YARN introduces Resource Profiles which extends the YARN resource model for easier resource-type management and profiles. This helps in two ways - the system can schedule applications better and it can perform intelligent over-subscription of resources where applicable.
|
||||
|
||||
Resource profiles are all the more important for services since -
|
||||
* Similar to short running apps, you don’t have to fiddle with varying resource-requirements for each container type
|
||||
* Services usually end up planning for peak usages, leaving a lot of possibility of barren utilization
|
||||
|
||||
### Special handling of preemption and container reservations
|
||||
TBD.
|
||||
|
||||
Preemption and reservation of long running containers have different implications from regular ones. Preemption of resources in YARN today works by killing of containers. For long-lived services this is unacceptable. Also, scheduler should avoid allocating long running containers on borrowed resources. [YARN-4724](https://issues.apache.org/jira/browse/YARN-4724) will address some of these special recognition of service containers.
|
||||
|
||||
### Container auto-restarts
|
||||
If a service container dies, expiring container's allocation and releasing the allocation is undesirable in many cases. Long running containers may exit for various reasons, crash and need to restart but forcing them to go through the complete scheduling cycle, resource localization, etc. is both unnecessary and expensive.
|
||||
|
||||
Services can enable app-specific policies to prevent NodeManagers to automatically restart containers. [YARN-3998](https://issues.apache.org/jira/browse/YARN-3998) implements a retry-policy to let NM re-launch a service container when it fails.
|
||||
|
||||
### Container allocation re-use for application upgrades
|
||||
TBD.
|
||||
|
||||
Auto-restart of containers will support upgrade of service containers without reclaiming the resources first. During an upgrade, with multitude of other applications running in the system, giving up and getting back resources allocated to the service is hard to manage. Node-Labels help this cause but are not straight-forward to use to address the app-specific use-cases. The umbrella [YARN-4726](https://issues.apache.org/jira/browse/YARN-4726) along with [YARN-5620](https://issues.apache.org/jira/browse/YARN-5620) and [YARN-4470](https://issues.apache.org/jira/browse/YARN-4470) will take care of this.
|
||||
|
||||
### Dynamic Configurations
|
||||
Most production-level services require dynamic configurations to manage and simplify their lifecycle. Container’s resource size, local/work dirs and log-dirs are the most basic information services need. Service's endpoint details (host/port), their inter-component dependencies, health-check endpoints, etc. are all critical to the success of today's real-life services.
|
||||
|
||||
### Resource re-localization for reconfiguration/upgrades
|
||||
TBD
|
||||
|
||||
### Service Registry
|
||||
TBD
|
||||
|
||||
### Service persistent storage and volume support
|
||||
TBD
|
||||
|
||||
### Packaging
|
||||
TBD
|
||||
|
||||
### Container image registry (private, public and hybrid)
|
||||
TBD
|
||||
|
||||
### Container image management and APIs
|
||||
TBD
|
||||
|
||||
### Container image storage
|
||||
TBD
|
||||
|
||||
### Monitoring
|
||||
TBD
|
||||
|
||||
### Metrics
|
||||
TBD
|
||||
|
||||
### Service Logs
|
||||
TBD
|
||||
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# Concepts
|
||||
This document describes some key concepts and features that makes YARN as a first-class platform in order to natively support long running services on YARN.
|
||||
|
||||
### Service Framework (ApplicationMaster) on YARN
|
||||
A container orchestration framework is implemented to help deploying services on YARN. In a nutshell, the framework is an ApplicationMaster that
|
||||
requests containers from ResourceManager based on service definition provided by the user and launch the containers across the cluster adhering to placement policies.
|
||||
It also does all the heavy lifting work such as resolving the service definition and configurations, managing component life cycles such as automatically restarting
|
||||
failed containers, monitoring components' healthiness and readiness, ensuring dependency start order across components, flexing up/down components,
|
||||
upgrading components etc. The end goal of the framework is to make sure the service is up and running as the state that user desired.
|
||||
|
||||
|
||||
### A Restful API-Server for deploying/managing services on YARN
|
||||
A restful API server is developed to allow users to deploy/manage their services on YARN via a simple JSON spec. This avoids users
|
||||
from dealing with the low-level APIs, writing complex code to bring their services onto YARN. The REST layer acts as a unified REST based entry for
|
||||
creation and lifecycle management of YARN services. Services here can range from simple single-component apps to the most complex,
|
||||
multi-component applications needing special orchestration needs. Please refer to this [API doc](YarnServiceAPI.md) for detailed API documentations.
|
||||
|
||||
The API-server is stateless, which means users can simply spin up multiple instances, and have a load balancer fronting them to
|
||||
support HA, distribute the load etc.
|
||||
|
||||
### Service Discovery
|
||||
A DNS server is implemented to enable discovering services on YARN via the standard mechanism: DNS lookup.
|
||||
The DNS server essentially exposes the information in YARN service registry by translating them into DNS records such as A record and SRV record.
|
||||
Clients can discover the IPs of containers via standard DNS lookup.
|
||||
The previous read mechanisms of YARN Service Registry were limited to a registry specific (java) API and a REST interface and are difficult
|
||||
to wireup existing clients and services. The DNS based service discovery eliminates this gap. Please refer to this [DNS doc](ServiceDiscovery.md)
|
||||
for more details.
|
||||
|
||||
### Scheduling
|
||||
|
||||
A host of scheduling features are being developed to support long running services.
|
||||
|
||||
* Affinity and anti-affinity scheduling across containers ([YARN-6592](https://issues.apache.org/jira/browse/YARN-6592)).
|
||||
* Container resizing ([YARN-1197](https://issues.apache.org/jira/browse/YARN-1197))
|
||||
* Special handling of container preemption/reservation for services
|
||||
|
||||
### Container auto-restarts
|
||||
|
||||
[YARN-3998](https://issues.apache.org/jira/browse/YARN-3998) implements a retry-policy to let NM re-launch a service container when it fails.
|
||||
The service REST API provides users a way to enable NodeManager to automatically restart the container if it fails.
|
||||
The advantage is that it avoids the entire cycle of releasing the failed containers, re-asking new containers, re-do resource localizations and so on, which
|
||||
greatly minimizes container downtime.
|
||||
|
||||
|
||||
### Container in-place upgrade
|
||||
|
||||
[YARN-4726](https://issues.apache.org/jira/browse/YARN-4726) aims to support upgrading containers in-place, that is, without losing the container allocations.
|
||||
It opens up a few APIs in NodeManager to allow ApplicationMasters to upgrade their containers via a simple API call.
|
||||
Under the hood, NodeManager does below steps:
|
||||
* Downloading the new resources such as jars, docker container images, new configurations.
|
||||
* Stop the old container.
|
||||
* Start the new container with the newly downloaded resources.
|
||||
|
||||
At the time of writing this document, core changes are done but the feature is not usable end-to-end.
|
||||
|
||||
### Resource Profiles
|
||||
|
||||
In [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926), YARN introduces Resource Profiles which extends the YARN resource model for easier
|
||||
resource-type management and profiles.
|
||||
It primarily solves two problems:
|
||||
* Make it easy to support new resource types such as network bandwith([YARN-2140](https://issues.apache.org/jira/browse/YARN-2140)), disks([YARN-2139](https://issues.apache.org/jira/browse/YARN-2139)).
|
||||
Under the hood, it unifies the scheduler codebase to essentially parameterize the resource types.
|
||||
* User can specify the container resource requirement by a profile name, rather than fiddling with varying resource-requirements for each resource type.
|
|
@ -0,0 +1,58 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# YARN Service
|
||||
## Overview
|
||||
Yarn Service framework provides first class support and APIs to host long running services natively in YARN.
|
||||
In a nutshell, it serves as a container orchestration platform for managing containerized services on YARN. It supports both docker container
|
||||
and traditional process based containers in YARN.
|
||||
|
||||
The responsibility of this framework includes performing configuration resolutions and mounts,
|
||||
lifecycle management such as stop/start/delete the service, flexing service components up/down, rolling upgrades services on YARN, monitoring services' healthiness and readiness and more.
|
||||
|
||||
The yarn-service framework primarily includes below components:
|
||||
|
||||
* A core framework (ApplicationMaster) running on YARN to serve as a container orchestrator, being responsible for all service lifecycle managements.
|
||||
* A restful API-server to for users to interact with YARN to deploy/manage their services via a simple JSON spec.
|
||||
* A DNS server backed by YARN service registry to enable discovering services on YARN by the standard DNS lookup.
|
||||
|
||||
## Why should I try YARN Service framework?
|
||||
|
||||
YARN Service framework makes it easy to bring existing services onto YARN.
|
||||
It hides all the complex low-level details of application management and relieves
|
||||
users from forced into writing new code. Developers of new services do not have
|
||||
to worry about YARN internals and only need to focus on containerization of their
|
||||
service(s).
|
||||
|
||||
Further, another huge win of this feature is that now you can enable both
|
||||
traditional batch processing jobs and long running services in a single platform!
|
||||
The benefits of combining these workloads are two-fold:
|
||||
|
||||
* Greatly simplify the cluster operations as you have only a single cluster to deal with.
|
||||
* Making both batch jobs and services share a cluster can greatly improve resource utilization.
|
||||
|
||||
## How do I get started?
|
||||
|
||||
*`This feature is in alpha state`* and so APIs, command lines are subject to change. We will continue to update the documents over time.
|
||||
|
||||
[QuickStart](QuickStart.md) shows a quick tutorial that walks you through simple steps to deploy a service on YARN.
|
||||
|
||||
## How do I get my hands dirty?
|
||||
|
||||
* [Concepts](Concepts.md): Describes the internals of the framework and some features in YARN core to support running services on YARN.
|
||||
* [Service REST API](YarnServiceAPI.md): The API doc for deploying/managing services on YARN.
|
||||
* [Service Discovery](ServiceDiscovery.md): Deep dives into the YARN DNS internals
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,218 @@
|
|||
<!---
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# Quick Start
|
||||
|
||||
This document describes how to deploy services on YARN using the YARN Service framework.
|
||||
|
||||
<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
|
||||
|
||||
## Start HDFS and YARN components
|
||||
|
||||
Start all the hadoop components HDFS, YARN as usual.
|
||||
|
||||
|
||||
## Example service
|
||||
Below is a simple service definition that launches sleep containers on YARN by writing a simple spec file and without writing any code.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "sleeper-service",
|
||||
"components" :
|
||||
[
|
||||
{
|
||||
"name": "sleeper",
|
||||
"number_of_containers": 1,
|
||||
"launch_command": "sleep 900000",
|
||||
"resource": {
|
||||
"cpus": 1,
|
||||
"memory": "256"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
For launching docker based services using YARN Service framework, please refer to [API doc](YarnServiceAPI.md).
|
||||
|
||||
## Manage services on YARN via CLI
|
||||
Below steps walk you through deploying a services on YARN using CLI.
|
||||
Refer to [Yarn Commands](../YarnCommands.md) for the full list of commands and options.
|
||||
### Deploy a service
|
||||
```
|
||||
yarn service create --file ${PATH_TO_SERVICE_DEF_FILE}
|
||||
```
|
||||
Params:
|
||||
- SERVICE_NAME: The name of the service. Note that this needs to be unique across all running services.
|
||||
- PATH_TO_SERVICE_DEF: The path to the service definition file in JSON format.
|
||||
|
||||
For example:
|
||||
```
|
||||
yarn service create --file /path/to/local/sleeper.json
|
||||
```
|
||||
|
||||
### Flex a component of a service
|
||||
Increase or decrease the number of containers for a component.
|
||||
```
|
||||
yarn service flex ${SERVICE_NAME} --component ${COMPONENT_NAME} ${NUMBER_OF_CONTAINERS}
|
||||
```
|
||||
For example, for a service named `sleeper-service`:
|
||||
|
||||
Set the `sleeper` component to `2` containers (absolute number).
|
||||
|
||||
```
|
||||
yarn service flex sleeper-service --component sleeper 2
|
||||
```
|
||||
|
||||
### Stop a service
|
||||
Stopping a service will stop all containers of the service and the ApplicationMaster, but does not delete the state of a service, such as the service root folder on hdfs.
|
||||
```
|
||||
yarn service stop ${SERVICE_NAME}
|
||||
```
|
||||
|
||||
### Restart a stopped service
|
||||
Restarting a stopped service is easy - just call start!
|
||||
```
|
||||
yarn service start ${SERVICE_NAME}
|
||||
```
|
||||
|
||||
### Destroy a service
|
||||
In addition to stopping a service, it also deletes the service root folder on hdfs and the records in YARN Service Registry.
|
||||
```
|
||||
yarn service destroy ${SERVICE_NAME}
|
||||
```
|
||||
|
||||
## Manage services on YARN via REST API
|
||||
Below steps walk you through deploying services on YARN via REST API.
|
||||
Refer to [API doc](YarnServiceAPI.md) for the detailed API specificatiosn.
|
||||
### Start API-Server for deploying services on YARN
|
||||
API server is the service that sits in front of YARN ResourceManager and lets users submit their API specs via HTTP.
|
||||
```
|
||||
yarn --daemon start apiserver
|
||||
```
|
||||
The above command starts the API Server on the localhost at port 9191 by default.
|
||||
|
||||
### Deploy a service
|
||||
POST the aforementioned example service definition to the api-server endpoint:
|
||||
```
|
||||
POST http://localhost:9191/ws/v1/services
|
||||
```
|
||||
|
||||
### Get a service status
|
||||
```
|
||||
GET http://localhost:9191/ws/v1/services/${SERVICE_NAME}
|
||||
```
|
||||
|
||||
### Flex a component of a service
|
||||
```
|
||||
PUT http://localhost:9191/ws/v1/services/${SERVICE_NAME}/components/${COMPONENT_NAME}
|
||||
```
|
||||
`PUT` Request body:
|
||||
```
|
||||
{
|
||||
"name": "${COMPONENT_NAME}",
|
||||
"number_of_containers": ${COUNT}
|
||||
}
|
||||
```
|
||||
For example:
|
||||
```
|
||||
{
|
||||
"name": "sleeper",
|
||||
"number_of_containers": 2
|
||||
}
|
||||
```
|
||||
|
||||
### Stop a service
|
||||
Stopping a service will stop all containers of the service and the ApplicationMaster, but does not delete the state of a service, such as the service root folder on hdfs.
|
||||
|
||||
```
|
||||
PUT http://localhost:9191/ws/v1/services/${SERVICE_NAME}
|
||||
```
|
||||
|
||||
`PUT` Request body:
|
||||
```
|
||||
{
|
||||
"name": "${SERVICE_NAME}",
|
||||
"state": "STOPPED"
|
||||
}
|
||||
```
|
||||
|
||||
### Restart a stopped service
|
||||
Restarting a stopped service is easy.
|
||||
|
||||
```
|
||||
PUT http://localhost:9191/ws/v1/services/${SERVICE_NAME}
|
||||
```
|
||||
|
||||
`PUT` Request body:
|
||||
```
|
||||
{
|
||||
"name": "${SERVICE_NAME}",
|
||||
"state": "STARTED"
|
||||
}
|
||||
```
|
||||
### Destroy a service
|
||||
In addition to stopping a service, it also deletes the service root folder on hdfs and the records in YARN Service Registry.
|
||||
```
|
||||
DELETE http://localhost:9191/ws/v1/services/${SERVICE_NAME}
|
||||
```
|
||||
|
||||
## Services UI with YARN UI2 and Timeline Service v2
|
||||
A new `service` tab is added in the YARN UI2 specially to show YARN Services in a first class manner.
|
||||
The services framework posts the data into TimelineService and the `service` UI reads data from TimelineService to render its content.
|
||||
|
||||
### Enable Timeline Service v2
|
||||
Please refer to [TimeLineService v2 doc](../TimelineServiceV2.md) for how to enable Timeline Service v2.
|
||||
|
||||
### Enable new YARN UI
|
||||
|
||||
Set below config in `yarn-site.xml` and start ResourceManager.
|
||||
If you are building from source code, make sure you use `-Pyarn-ui` in the `mvn` command - this will generate the war file for the new YARN UI.
|
||||
```
|
||||
<property>
|
||||
<description>To enable RM web ui2 application.</description>
|
||||
<name>yarn.webapp.ui2.enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
```
|
||||
|
||||
## Service Discovery with YARN DNS
|
||||
YARN Service framework comes with a DNS server (backed by YARN Service Registry) which enables DNS based discovery of services deployed on YARN.
|
||||
That is, user can simply access their services in a well-defined naming format as below:
|
||||
|
||||
```
|
||||
${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}
|
||||
```
|
||||
For example, in a cluster whose domain name is `yarncluster` (as defined by the `hadoop.registry.dns.domain-name` in `yarn-site.xml`), a service named `hbase` deployed by user `dev`
|
||||
with two components `hbasemaster` and `regionserver` can be accessed as below:
|
||||
|
||||
This URL points to the usual hbase master UI
|
||||
```
|
||||
http://hbasemaster-0.hbase.dev.yarncluster:16010/master-status
|
||||
```
|
||||
|
||||
|
||||
Note that YARN service framework assigns COMPONENT_INSTANCE_NAME for each container in a sequence of monotonically increasing integers. For example, `hbasemaster-0` gets
|
||||
assigned `0` since it is the first and only instance for the `hbasemaster` component. In case of `regionserver` component, it can have multiple containers
|
||||
and so be named as such: `regionserver-0`, `regionserver-1`, `regionserver-2` ... etc
|
||||
|
||||
`Disclaimer`: The DNS implementation is still experimental. It should not be used as a fully-functional corporate DNS.
|
||||
|
||||
### Start the DNS server
|
||||
By default, the DNS runs on non-privileged port `5353`.
|
||||
If it is configured to use the standard privileged port `53`, the DNS server needs to be run as root:
|
||||
```
|
||||
sudo su - -c "yarn org.apache.hadoop.registry.server.dns.RegistryDNSServer > /${HADOOP_LOG_FOLDER}/registryDNS.log 2>&1 &" root
|
||||
```
|
||||
Please refer to [YARN DNS doc](ServicesDiscovery.md) for the full list of configurations.
|
|
@ -40,7 +40,7 @@ The following core functions are supported by the DNS-Server:
|
|||
|
||||
1. Supports creation of DNS records for end-points of the deployed YARN applications
|
||||
2. Record names remain unchanged during restart of containers and/or applications
|
||||
3. Supports reverse lookups (name based on IP).
|
||||
3. Supports reverse lookups (name based on IP). Note, this works only for Docker containers.
|
||||
4. Supports security using the standards defined by The Domain Name System Security
|
||||
Extensions (DNSSEC)
|
||||
5. Highly available
|
||||
|
@ -55,7 +55,7 @@ a DNS server for a Hadoop cluster zone/domain. The server is not intended to act
|
|||
primary DNS server and does not forward requests to other servers.
|
||||
2. The DNS Server exposes a port that can receive both TCP and UDP requests per
|
||||
DNS standards. The default port for DNS protocols is in a restricted, administrative port
|
||||
range (53), so the port is configurable for deployments in which the service may
|
||||
range (5353), so the port is configurable for deployments in which the service may
|
||||
not be managed via an administrative account.
|
||||
|
||||
## DNS Record Name Structure
|
||||
|
@ -101,7 +101,6 @@ application name. These application names have to be unique for a given user.
|
|||
|
||||
The primary functions of the DNS service are illustrated in the following diagram:
|
||||
|
||||
|
||||
![DNS Functional Overview](../images/dns_overview.png "DNS Functional Overview")
|
||||
|
||||
### DNS record creation
|
||||
|
@ -120,12 +119,19 @@ requiring similar parsing logic to identify the specific records that should be
|
|||
|
||||
### DNS Service initialization
|
||||
* The DNS service initializes both UDP and TCP listeners on a configured port. As
|
||||
noted above, the default port of 53 is in a restricted range that is only accessible to an
|
||||
noted above, the default port of 5353 is in a restricted range that is only accessible to an
|
||||
account with administrative privileges.
|
||||
* Subsequently, the DNS service listens for inbound DNS requests. Those requests are
|
||||
standard DNS requests from users or other DNS servers (for example, DNS servers that have the
|
||||
YARN DNS service configured as a forwarder).
|
||||
|
||||
## Start the DNS Server
|
||||
By default, the DNS runs on non-privileged port `5353`.
|
||||
If it is configured to use the standard privileged port `53`, the DNS server needs to be run as root:
|
||||
```
|
||||
sudo su - -c "yarn org.apache.hadoop.registry.server.dns.RegistryDNSServer > /${HADOOP_LOG_FOLDER}/registryDNS.log 2>&1 &" root
|
||||
```
|
||||
|
||||
## Configuration
|
||||
The YARN DNS server reads its configuration properties from the yarn-site.xml file. The following are the DNS associated configuration properties:
|
||||
|
||||
|
@ -134,7 +140,7 @@ The YARN DNS server reads its configuration properties from the yarn-site.xml fi
|
|||
| hadoop.registry.dns.enabled | The DNS functionality is enabled for the cluster. Default is false. |
|
||||
| hadoop.registry.dns.domain-name | The domain name for Hadoop cluster associated records. |
|
||||
| hadoop.registry.dns.bind-address | Address associated with the network interface to which the DNS listener should bind. |
|
||||
| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 53. However, since that port falls in a administrator-only range, typical deployments may need to specify an alternate port. |
|
||||
| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 5353. However, since that port falls in a administrator-only range, typical deployments may need to specify an alternate port. |
|
||||
| hadoop.registry.dns.dnssec.enabled | Indicates whether the DNSSEC support is enabled. Default is false. |
|
||||
| hadoop.registry.dns.public-key | The base64 representation of the server’s public key. Leveraged for creating the DNSKEY Record provided for DNSSEC client requests. |
|
||||
| hadoop.registry.dns.private-key-file | The path to the standard DNSSEC private key file. Must only be readable by the DNS launching identity. See [dnssec-keygen](https://ftp.isc.org/isc/bind/cur/9.9/doc/arm/man.dnssec-keygen.html) documentation. |
|
|
@ -12,33 +12,19 @@
|
|||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
# YARN Simplified API layer for services
|
||||
# YARN Service API
|
||||
|
||||
## Overview
|
||||
Bringing a new service on YARN today is not a simple experience. The APIs of
|
||||
existing frameworks are either too low level (native YARN), require writing
|
||||
new code (for frameworks with programmatic APIs) or writing a complex spec
|
||||
(for declarative frameworks). In addition to building critical building blocks
|
||||
inside YARN (as part of other efforts at
|
||||
[YARN-4692](https://issues.apache.org/jira/browse/YARN-4692)), there is a need for
|
||||
simplifying the user facing story for building services. Experience of projects
|
||||
like Apache Slider running real-life services like HBase, Storm, Accumulo,
|
||||
Solr etc, gives us some very good insights on how simplified APIs for services
|
||||
should look like.
|
||||
Bringing a new service on YARN today is not a simple experience. The APIs of existing
|
||||
frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs)
|
||||
or writing a complex spec (for declarative frameworks).
|
||||
|
||||
To this end, we should look at a new simple-services API layer backed by REST
|
||||
interfaces. This API can be used to create and manage the lifecycle of YARN
|
||||
services. Services here can range from simple single-component service to
|
||||
complex multi-component assemblies needing orchestration.
|
||||
[YARN-4793](https://issues.apache.org/jira/browse/YARN-4793) tracks this
|
||||
effort.
|
||||
This simplified REST API can be used to create and manage the lifecycle of YARN services.
|
||||
In most cases, the application owner will not be forced to make any changes to their applications.
|
||||
This is primarily true if the application is packaged with containerization technologies like Docker.
|
||||
|
||||
This document spotlights on this specification. In most of the cases, the
|
||||
application owner will not be forced to make any changes to their applications.
|
||||
This is primarily true if the application is packaged with containerization
|
||||
technologies like docker. Irrespective of how complex the application is,
|
||||
there will be hooks provided at appropriate layers to allow pluggable and
|
||||
customizable application behavior.
|
||||
This document describes the API specifications (aka. YarnFile) for deploying/managing
|
||||
containerized services on YARN. The same JSON spec can be used for both REST API
|
||||
and CLI to manage the services.
|
||||
|
||||
|
||||
### Version information
|
||||
|
@ -51,7 +37,7 @@ License URL: http://www.apache.org/licenses/LICENSE-2.0.html
|
|||
### URI scheme
|
||||
Host: host.mycompany.com
|
||||
|
||||
BasePath: /ws/v1/
|
||||
Port: 9191(default)
|
||||
|
||||
Schemes: HTTP
|
||||
|
||||
|
@ -68,7 +54,7 @@ Schemes: HTTP
|
|||
## Paths
|
||||
### Create a service
|
||||
```
|
||||
POST /services
|
||||
POST /ws/v1/services
|
||||
```
|
||||
|
||||
#### Description
|
||||
|
@ -92,7 +78,7 @@ Create a service. The request JSON is a service object with details required for
|
|||
|
||||
### (TBD) List of services running in the cluster.
|
||||
```
|
||||
GET /services
|
||||
GET /ws/v1/services
|
||||
```
|
||||
|
||||
#### Description
|
||||
|
@ -108,7 +94,7 @@ Get a list of all currently running services (response includes a minimal projec
|
|||
|
||||
### Get current version of the API server.
|
||||
```
|
||||
GET /services/version
|
||||
GET /ws/v1/services/version
|
||||
```
|
||||
|
||||
#### Description
|
||||
|
@ -123,7 +109,7 @@ Get current version of the API server.
|
|||
|
||||
### Update a service or upgrade the binary version of the components of a running service
|
||||
```
|
||||
PUT /services/{service_name}
|
||||
PUT /ws/v1/services/{service_name}
|
||||
```
|
||||
|
||||
#### Description
|
||||
|
@ -147,7 +133,7 @@ Update the runtime properties of a service. Currently the following operations a
|
|||
|
||||
### Destroy a service
|
||||
```
|
||||
DELETE /services/{service_name}
|
||||
DELETE /ws/v1/services/{service_name}
|
||||
```
|
||||
|
||||
#### Description
|
||||
|
@ -170,7 +156,7 @@ Destroy a service and release all resources. This API might have to return JSON
|
|||
|
||||
### Get details of a service.
|
||||
```
|
||||
GET /services/{service_name}
|
||||
GET /ws/v1/services/{service_name}
|
||||
```
|
||||
|
||||
#### Description
|
||||
|
@ -193,7 +179,7 @@ Return the details (including containers) of a running service
|
|||
|
||||
### Flex a component's number of instances.
|
||||
```
|
||||
PUT /services/{service_name}/components/{component_name}
|
||||
PUT /ws/v1/services/{service_name}/components/{component_name}
|
||||
```
|
||||
|
||||
#### Description
|
Loading…
Reference in New Issue