YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He
This commit is contained in:
parent
024e90a508
commit
c31cd981eb
|
@ -30,8 +30,6 @@ public interface ApplicationApi {
|
|||
|
||||
Response getApplications(String state);
|
||||
|
||||
Response getApplication(String appName);
|
||||
|
||||
Response deleteApplication(String appName);
|
||||
|
||||
Response updateApplication(String appName, Application updateAppData);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -17,7 +17,7 @@
|
|||
|
||||
package org.apache.hadoop.yarn.services.webapp;
|
||||
|
||||
import static org.apache.hadoop.yarn.services.utils.RestApiConstants.*;
|
||||
import static org.apache.slider.util.RestApiConstants.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
|
|
|
@ -17,16 +17,15 @@
|
|||
|
||||
package org.apache.hadoop.yarn.services.api.impl;
|
||||
|
||||
import static org.apache.hadoop.yarn.services.utils.RestApiConstants.*;
|
||||
import static org.apache.hadoop.yarn.services.utils.RestApiErrorMessages.*;
|
||||
import static org.apache.slider.util.RestApiConstants.*;
|
||||
import static org.apache.slider.util.RestApiErrorMessages.*;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.resource.Artifact;
|
||||
import org.apache.slider.api.resource.Resource;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.util.ServiceApiUtil;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -61,12 +60,10 @@ public class TestApplicationApiService {
|
|||
@Test(timeout = 90000)
|
||||
public void testValidateApplicationPostPayload() throws Exception {
|
||||
Application app = new Application();
|
||||
Map<String, String> compNameArtifactIdMap = new HashMap<>();
|
||||
|
||||
// no name
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with no name");
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
|
||||
|
@ -77,8 +74,7 @@ public class TestApplicationApiService {
|
|||
for (String badName : badNames) {
|
||||
app.setName(badName);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName);
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID_FORMAT,
|
||||
|
@ -89,8 +85,7 @@ public class TestApplicationApiService {
|
|||
// no artifact
|
||||
app.setName("finance_home");
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with no artifact");
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_ARTIFACT_INVALID, e.getMessage());
|
||||
|
@ -100,8 +95,7 @@ public class TestApplicationApiService {
|
|||
Artifact artifact = new Artifact();
|
||||
app.setArtifact(artifact);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
|
||||
|
@ -112,8 +106,7 @@ public class TestApplicationApiService {
|
|||
artifact.setId("app.io/hbase:facebook_0.2");
|
||||
app.setNumberOfContainers(5l);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.error("application attributes specified should be valid here", e);
|
||||
Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
|
||||
|
@ -124,22 +117,18 @@ public class TestApplicationApiService {
|
|||
Assert.assertEquals(app.getComponents().get(0).getName(),
|
||||
DEFAULT_COMPONENT_NAME);
|
||||
Assert.assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME);
|
||||
Assert.assertEquals("Property not set",
|
||||
app.getConfiguration().getProperties()
|
||||
.get(SliderKeys.COMPONENT_TYPE_KEY),
|
||||
SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
|
||||
//TODO handle external app
|
||||
|
||||
// unset artifact type, default component and no of containers to test other
|
||||
// validation logic
|
||||
artifact.setType(null);
|
||||
app.setComponents(null);
|
||||
app.setComponents(new ArrayList<>());
|
||||
app.setNumberOfContainers(null);
|
||||
|
||||
// resource not specified
|
||||
artifact.setId("docker.io/centos:centos7");
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with no resource");
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_RESOURCE_INVALID, e.getMessage());
|
||||
|
@ -149,28 +138,18 @@ public class TestApplicationApiService {
|
|||
Resource res = new Resource();
|
||||
app.setResource(res);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with no memory");
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_RESOURCE_MEMORY_INVALID, e.getMessage());
|
||||
}
|
||||
|
||||
// cpus not specified
|
||||
res.setMemory("2gb");
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
Assert.fail(EXCEPTION_PREFIX + "application with no cpu");
|
||||
} catch (IllegalArgumentException e) {
|
||||
Assert.assertEquals(ERROR_RESOURCE_CPUS_INVALID, e.getMessage());
|
||||
}
|
||||
|
||||
// cpu does not need to be always specified, it's an optional feature in yarn
|
||||
// invalid no of cpus
|
||||
res.setMemory("100mb");
|
||||
res.setCpus(-2);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(
|
||||
EXCEPTION_PREFIX + "application with invalid no of cpups");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -180,8 +159,7 @@ public class TestApplicationApiService {
|
|||
// number of containers not specified
|
||||
res.setCpus(2);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(
|
||||
EXCEPTION_PREFIX + "application with no container count");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -191,8 +169,7 @@ public class TestApplicationApiService {
|
|||
// specifying profile along with cpus/memory raises exception
|
||||
res.setProfile("hbase_finance_large");
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX
|
||||
+ "application with resource profile along with cpus/memory");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -205,8 +182,7 @@ public class TestApplicationApiService {
|
|||
res.setCpus(null);
|
||||
res.setMemory(null);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
Assert.fail(EXCEPTION_PREFIX
|
||||
+ "application with resource profile only - NOT SUPPORTED");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -222,8 +198,7 @@ public class TestApplicationApiService {
|
|||
// everything valid here
|
||||
app.setNumberOfContainers(5l);
|
||||
try {
|
||||
appApiService.validateApplicationPostPayload(app,
|
||||
compNameArtifactIdMap);
|
||||
ServiceApiUtil.validateApplicationPostPayload(app);
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.error("application attributes specified should be valid here", e);
|
||||
Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
|
||||
|
|
|
@ -59,14 +59,6 @@ public interface SliderApplicationApi {
|
|||
*/
|
||||
ConfTreeOperations getDesiredResources() throws IOException;
|
||||
|
||||
/**
|
||||
* Put an updated resources structure. This triggers a cluster flex
|
||||
* operation
|
||||
* @param updated updated resources
|
||||
* @throws IOException on any problem.
|
||||
*/
|
||||
void putDesiredResources(ConfTree updated) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the aggregate resolved model
|
||||
* @return the aggregate configuration of what was asked for
|
||||
|
|
|
@ -52,12 +52,9 @@ public interface SliderClusterProtocol extends VersionedProtocol {
|
|||
Messages.UpgradeContainersRequestProto request) throws IOException,
|
||||
YarnException;
|
||||
|
||||
/**
|
||||
* Flex the cluster.
|
||||
*/
|
||||
Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
|
||||
throws IOException;
|
||||
|
||||
Messages.FlexComponentResponseProto flexComponent(
|
||||
Messages.FlexComponentRequestProto request) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the current cluster status
|
||||
|
@ -120,13 +117,6 @@ public interface SliderClusterProtocol extends VersionedProtocol {
|
|||
Messages.AMSuicideResponseProto amSuicide(Messages.AMSuicideRequestProto request)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the instance definition
|
||||
*/
|
||||
Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
|
||||
Messages.GetInstanceDefinitionRequestProto request)
|
||||
throws IOException, YarnException;
|
||||
|
||||
/**
|
||||
* Get the application liveness
|
||||
* @return current liveness information
|
||||
|
|
|
@ -28,6 +28,7 @@ import javax.xml.bind.annotation.XmlElement;
|
|||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
@ -55,11 +56,11 @@ public class Application extends BaseResource {
|
|||
private Long numberOfRunningContainers = null;
|
||||
private Long lifetime = null;
|
||||
private PlacementPolicy placementPolicy = null;
|
||||
private List<Component> components = null;
|
||||
private Configuration configuration = null;
|
||||
private List<Component> components = new ArrayList<>();
|
||||
private Configuration configuration = new Configuration();
|
||||
private List<Container> containers = new ArrayList<>();
|
||||
private ApplicationState state = null;
|
||||
private Map<String, String> quicklinks = null;
|
||||
private Map<String, String> quicklinks = new HashMap<>();
|
||||
private String queue = null;
|
||||
|
||||
/**
|
||||
|
@ -285,6 +286,15 @@ public class Application extends BaseResource {
|
|||
this.components = components;
|
||||
}
|
||||
|
||||
public Component getComponent(String name) {
|
||||
for (Component component : components) {
|
||||
if (component.getName().equals(name)) {
|
||||
return component;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Config properties of an application. Configurations provided at the
|
||||
* application/global level are available to all the components. Specific
|
||||
|
|
|
@ -22,6 +22,7 @@ import io.swagger.annotations.ApiModelProperty;
|
|||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -49,15 +50,17 @@ public class Component implements Serializable {
|
|||
private String name = null;
|
||||
private List<String> dependencies = new ArrayList<String>();
|
||||
private ReadinessCheck readinessCheck = null;
|
||||
private Artifact artifact = null;
|
||||
private Artifact artifact = new Artifact();
|
||||
private String launchCommand = null;
|
||||
private Resource resource = null;
|
||||
private Resource resource = new Resource();
|
||||
private Long numberOfContainers = null;
|
||||
private Boolean uniqueComponentSupport = null;
|
||||
private Boolean runPrivilegedContainer = null;
|
||||
private Boolean uniqueComponentSupport = false;
|
||||
private Boolean runPrivilegedContainer = false;
|
||||
private PlacementPolicy placementPolicy = null;
|
||||
private Configuration configuration = null;
|
||||
private Configuration configuration = new Configuration();
|
||||
private List<String> quicklinks = new ArrayList<String>();
|
||||
private List<Container> containers =
|
||||
Collections.synchronizedList(new ArrayList<Container>());
|
||||
|
||||
/**
|
||||
* Name of the application component (mandatory).
|
||||
|
@ -196,6 +199,29 @@ public class Component implements Serializable {
|
|||
this.numberOfContainers = numberOfContainers;
|
||||
}
|
||||
|
||||
@ApiModelProperty(example = "null", value = "Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started application.")
|
||||
@JsonProperty("containers")
|
||||
public List<Container> getContainers() {
|
||||
return containers;
|
||||
}
|
||||
|
||||
public void setContainers(List<Container> containers) {
|
||||
this.containers = containers;
|
||||
}
|
||||
|
||||
public void addContainer(Container container) {
|
||||
this.containers.add(container);
|
||||
}
|
||||
|
||||
public Container getContainer(String id) {
|
||||
for (Container container : containers) {
|
||||
if (container.getId().equals(id)) {
|
||||
return container;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Certain applications need to define multiple components using the same
|
||||
* artifact and resource profile, differing only in configurations. In such
|
||||
|
@ -354,6 +380,8 @@ public class Component implements Serializable {
|
|||
sb.append(" resource: ").append(toIndentedString(resource)).append("\n");
|
||||
sb.append(" numberOfContainers: ")
|
||||
.append(toIndentedString(numberOfContainers)).append("\n");
|
||||
sb.append(" containers: ").append(toIndentedString(containers))
|
||||
.append("\n");
|
||||
sb.append(" uniqueComponentSupport: ")
|
||||
.append(toIndentedString(uniqueComponentSupport)).append("\n");
|
||||
sb.append(" runPrivilegedContainer: ")
|
||||
|
|
|
@ -21,6 +21,7 @@ import io.swagger.annotations.ApiModel;
|
|||
import io.swagger.annotations.ApiModelProperty;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
|
@ -62,7 +63,7 @@ public class ConfigFile implements Serializable {
|
|||
private TypeEnum type = null;
|
||||
private String destFile = null;
|
||||
private String srcFile = null;
|
||||
private Object props = null;
|
||||
private Map<String, String> props = null;
|
||||
|
||||
/**
|
||||
* Config file in the standard format like xml, properties, json, yaml,
|
||||
|
@ -104,6 +105,8 @@ public class ConfigFile implements Serializable {
|
|||
}
|
||||
|
||||
/**
|
||||
* TODO this probably is not required for non-template configs. It is now used as symlink for localization for non-template configs - we could infer the name from destFile instead
|
||||
*
|
||||
* Required for type template. This provides the source location of the
|
||||
* template which needs to be mounted as dest_file post property
|
||||
* substitutions. Typically the src_file would point to a source controlled
|
||||
|
@ -131,21 +134,36 @@ public class ConfigFile implements Serializable {
|
|||
* src_file is mandatory and the src_file content is dumped to dest_file post
|
||||
* property substitutions.
|
||||
**/
|
||||
public ConfigFile props(Object props) {
|
||||
public ConfigFile props(Map<String, String> props) {
|
||||
this.props = props;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.")
|
||||
@JsonProperty("props")
|
||||
public Object getProps() {
|
||||
public Map<String, String> getProps() {
|
||||
return props;
|
||||
}
|
||||
|
||||
public void setProps(Object props) {
|
||||
public void setProps(Map<String, String> props) {
|
||||
this.props = props;
|
||||
}
|
||||
|
||||
public long getLong(String name, long defaultValue) {
|
||||
if (name == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
String value = props.get(name.trim());
|
||||
return Long.parseLong(value);
|
||||
}
|
||||
|
||||
public boolean getBoolean(String name, boolean defaultValue) {
|
||||
if (name == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
return Boolean.valueOf(props.get(name.trim()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object o) {
|
||||
if (this == o) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.Objects;
|
|||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
|
||||
/**
|
||||
* Set of configuration properties that can be injected into the application
|
||||
|
@ -104,6 +105,35 @@ public class Configuration implements Serializable {
|
|||
this.files = files;
|
||||
}
|
||||
|
||||
public long getPropertyLong(String name, long defaultValue) {
|
||||
if (name == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
String value = properties.get(name.trim());
|
||||
if (StringUtils.isEmpty(value)) {
|
||||
return defaultValue;
|
||||
}
|
||||
return Long.parseLong(value);
|
||||
}
|
||||
|
||||
public String getProperty(String name, String defaultValue) {
|
||||
if (name == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
return properties.get(name.trim());
|
||||
}
|
||||
|
||||
public void setProperty(String name, String value) {
|
||||
properties.put(name, value);
|
||||
}
|
||||
|
||||
public String getProperty(String name) {
|
||||
if (name == null) {
|
||||
return null;
|
||||
}
|
||||
return properties.get(name.trim());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(java.lang.Object o) {
|
||||
if (this == o) {
|
||||
|
|
|
@ -39,7 +39,7 @@ public class Resource extends BaseResource implements Cloneable {
|
|||
private static final long serialVersionUID = -6431667797380250037L;
|
||||
|
||||
private String profile = null;
|
||||
private Integer cpus = null;
|
||||
private Integer cpus = 1;
|
||||
private String memory = null;
|
||||
|
||||
/**
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -23,6 +23,7 @@ import org.apache.hadoop.service.Service;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.types.NodeInformationList;
|
||||
import org.apache.slider.api.types.SliderInstanceDescription;
|
||||
import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
|
||||
|
@ -61,15 +62,8 @@ import java.util.Map;
|
|||
* Stability: evolving
|
||||
*/
|
||||
public interface SliderClientAPI extends Service {
|
||||
/**
|
||||
* Destroy a cluster. There's two race conditions here
|
||||
* #1 the cluster is started between verifying that there are no live
|
||||
* clusters of that name.
|
||||
*/
|
||||
int actionDestroy(String clustername, ActionDestroyArgs destroyArgs)
|
||||
throws YarnException, IOException;
|
||||
|
||||
int actionDestroy(String clustername) throws YarnException,
|
||||
void actionDestroy(String clustername) throws YarnException,
|
||||
IOException;
|
||||
|
||||
/**
|
||||
|
@ -87,18 +81,6 @@ public interface SliderClientAPI extends Service {
|
|||
AbstractClientProvider createClientProvider(String provider)
|
||||
throws SliderException;
|
||||
|
||||
/**
|
||||
* Build up the cluster specification/directory
|
||||
*
|
||||
* @param clustername cluster name
|
||||
* @param buildInfo the arguments needed to build the cluster
|
||||
* @throws YarnException Yarn problems
|
||||
* @throws IOException other problems
|
||||
* @throws BadCommandArgumentsException bad arguments.
|
||||
*/
|
||||
int actionBuild(String clustername,
|
||||
AbstractClusterBuildingActionArgs buildInfo) throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Upload keytab to a designated sub-directory of the user home directory
|
||||
*
|
||||
|
@ -190,53 +172,14 @@ public interface SliderClientAPI extends Service {
|
|||
ActionUpgradeArgs buildInfo)
|
||||
throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Get the report of a this application
|
||||
* @return the app report or null if it could not be found.
|
||||
* @throws IOException
|
||||
* @throws YarnException
|
||||
*/
|
||||
ApplicationReport getApplicationReport()
|
||||
throws IOException, YarnException;
|
||||
|
||||
/**
|
||||
* Kill the submitted application via YARN
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
boolean forceKillApplication(String reason)
|
||||
throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Implement the list action: list all nodes
|
||||
* @return exit code of 0 if a list was created
|
||||
*/
|
||||
int actionList(String clustername, ActionListArgs args) throws IOException, YarnException;
|
||||
|
||||
/**
|
||||
* Enumerate slider instances for the current user, and the
|
||||
* most recent app report, where available.
|
||||
* @param listOnlyInState boolean to indicate that the instances should
|
||||
* only include those in a YARN state
|
||||
* <code> minAppState <= currentState <= maxAppState </code>
|
||||
*
|
||||
* @param minAppState minimum application state to include in enumeration.
|
||||
* @param maxAppState maximum application state to include
|
||||
* @return a map of application instance name to description
|
||||
* @throws IOException Any IO problem
|
||||
* @throws YarnException YARN problems
|
||||
*/
|
||||
Map<String, SliderInstanceDescription> enumSliderInstances(
|
||||
boolean listOnlyInState,
|
||||
YarnApplicationState minAppState,
|
||||
YarnApplicationState maxAppState)
|
||||
throws IOException, YarnException;
|
||||
|
||||
/**
|
||||
* Implement the islive action: probe for a cluster of the given name existing
|
||||
* @return exit code
|
||||
*/
|
||||
int actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException;
|
||||
void actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Test for a cluster existing probe for a cluster of the given name existing
|
||||
|
@ -288,7 +231,7 @@ public interface SliderClientAPI extends Service {
|
|||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
String actionStatus(String clustername) throws YarnException, IOException;
|
||||
Application actionStatus(String clustername) throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Version Details
|
||||
|
@ -303,13 +246,13 @@ public interface SliderClientAPI extends Service {
|
|||
* @param freezeArgs arguments to the stop
|
||||
* @return EXIT_SUCCESS if the cluster was not running by the end of the operation
|
||||
*/
|
||||
int actionFreeze(String clustername, ActionFreezeArgs freezeArgs)
|
||||
void actionStop(String clustername, ActionFreezeArgs freezeArgs)
|
||||
throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Restore a cluster
|
||||
*/
|
||||
int actionThaw(String clustername, ActionThawArgs thaw) throws YarnException, IOException;
|
||||
int actionStart(String clustername, ActionThawArgs thaw) throws YarnException, IOException;
|
||||
|
||||
/**
|
||||
* Registry operation
|
||||
|
|
|
@ -60,6 +60,10 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
//TODO, Remove this class and YarnAppListClient
|
||||
// why do we need so many yarn client wrappers ?
|
||||
// - yarn client already provides most of functionality already
|
||||
|
||||
/**
|
||||
* A class that extends visibility to some of the YarnClientImpl
|
||||
* members and data structures, and factors out pure-YARN operations
|
||||
|
@ -68,25 +72,6 @@ import java.util.Set;
|
|||
public class SliderYarnClientImpl extends YarnClientImpl {
|
||||
protected static final Logger log = LoggerFactory.getLogger(SliderYarnClientImpl.class);
|
||||
|
||||
/**
|
||||
* Keyword to use in the {@link #emergencyForceKill(String)}
|
||||
* operation to force kill <i>all</i> application instances belonging
|
||||
* to a specific user
|
||||
*/
|
||||
public static final String KILL_ALL = "all";
|
||||
|
||||
@Override
|
||||
protected void serviceInit(Configuration conf) throws Exception {
|
||||
InetSocketAddress clientRpcAddress = SliderUtils.getRmAddress(conf);
|
||||
if (!SliderUtils.isAddressDefined(clientRpcAddress)) {
|
||||
// address isn't known; fail fast
|
||||
throw new BindException("Invalid " + YarnConfiguration.RM_ADDRESS
|
||||
+ " value:" + conf.get(YarnConfiguration.RM_ADDRESS)
|
||||
+ " - see https://wiki.apache.org/hadoop/UnsetHostnameOrPort");
|
||||
}
|
||||
super.serviceInit(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the RM Client RPC interface
|
||||
* @return an RPC interface valid after initialization and authentication
|
||||
|
@ -95,52 +80,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
|
|||
return rmClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* List Slider <i>running</i>instances belonging to a specific user.
|
||||
* @deprecated use {@link #listDeployedInstances(String)}
|
||||
* @param user user: "" means all users
|
||||
* @return a possibly empty list of Slider AMs
|
||||
*/
|
||||
public List<ApplicationReport> listInstances(String user)
|
||||
throws YarnException, IOException {
|
||||
return listDeployedInstances(user);
|
||||
}
|
||||
|
||||
/**
|
||||
* List Slider <i>deployed</i>instances belonging to a specific user.
|
||||
* <p>
|
||||
* Deployed means: known about in the YARN cluster; it will include
|
||||
* any that are in the failed/finished state, as well as those queued
|
||||
* for starting.
|
||||
* @param user user: "" means all users
|
||||
* @return a possibly empty list of Slider AMs
|
||||
*/
|
||||
public List<ApplicationReport> listDeployedInstances(String user)
|
||||
throws YarnException, IOException {
|
||||
return listDeployedInstances(user, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* List Slider <i>deployed</i>instances belonging to a specific user in a
|
||||
* given set of states.
|
||||
* <p>
|
||||
* Deployed means: known about in the YARN cluster; it will include all apps
|
||||
* in the specified set of states.
|
||||
*
|
||||
* @param user
|
||||
* user: "" means all users
|
||||
* @param appStates
|
||||
* filter by a set of YarnApplicationState
|
||||
* @return a possibly empty list of Slider AMs
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public List<ApplicationReport> listDeployedInstances(String user,
|
||||
EnumSet<YarnApplicationState> appStates)
|
||||
throws YarnException, IOException {
|
||||
return listDeployedInstances(user, appStates, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* List Slider <i>deployed</i>instances belonging to a specific user in a
|
||||
* given set of states and filtered by an application name tag.
|
||||
|
@ -179,21 +118,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
|
|||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* find all instances of a specific app -if there is more than one in the
|
||||
* YARN cluster,
|
||||
* this returns them all
|
||||
* @param user user; use "" for all users
|
||||
* @param appname application name
|
||||
* @return the list of all matching application instances
|
||||
*/
|
||||
public List<ApplicationReport> findAllInstances(String user, String appname)
|
||||
throws IOException, YarnException {
|
||||
Preconditions.checkArgument(appname != null, "Null application name");
|
||||
|
||||
return listDeployedInstances(user, null, appname);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to determine if a cluster application is running -or
|
||||
* is earlier in the lifecycle
|
||||
|
@ -206,122 +130,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
|
|||
return app.getYarnApplicationState().ordinal() <= YarnApplicationState.RUNNING.ordinal();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Kill a running application
|
||||
* @param applicationId app Id
|
||||
* @param reason reason: reason for log
|
||||
* @return the response
|
||||
* @throws YarnException YARN problems
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
public KillApplicationResponse killRunningApplication(ApplicationId applicationId,
|
||||
String reason)
|
||||
throws YarnException, IOException {
|
||||
Preconditions.checkArgument(applicationId != null, "Null application Id");
|
||||
log.info("Killing application {} - {}", applicationId.getClusterTimestamp(),
|
||||
reason);
|
||||
KillApplicationRequest request =
|
||||
Records.newRecord(KillApplicationRequest.class);
|
||||
request.setApplicationId(applicationId);
|
||||
return getRmClient().forceKillApplication(request);
|
||||
}
|
||||
|
||||
private String getUsername() throws IOException {
|
||||
return UserGroupInformation.getCurrentUser().getShortUserName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Force kill a yarn application by ID. No niceties here
|
||||
* @param applicationId app Id. "all" means "kill all instances of the current user
|
||||
*
|
||||
*/
|
||||
public void emergencyForceKill(String applicationId)
|
||||
throws YarnException, IOException {
|
||||
|
||||
Preconditions.checkArgument(StringUtils.isNotEmpty(applicationId),
|
||||
"Null/empty application Id");
|
||||
|
||||
if (KILL_ALL.equals(applicationId)) {
|
||||
// user wants all instances killed
|
||||
String user = getUsername();
|
||||
log.info("Killing all applications belonging to {}", user);
|
||||
Collection<ApplicationReport> instances = listDeployedInstances(user,
|
||||
SliderUtils.getAllLiveAppStates());
|
||||
for (ApplicationReport instance : instances) {
|
||||
ApplicationId appId = instance.getApplicationId();
|
||||
log.info("Killing Application {}", appId);
|
||||
killRunningApplication(appId, "forced kill");
|
||||
}
|
||||
} else {
|
||||
ApplicationId appId = ConverterUtils.toApplicationId(applicationId);
|
||||
|
||||
log.info("Killing Application {}", applicationId);
|
||||
|
||||
killRunningApplication(appId, "forced kill");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitor the submitted application for reaching the requested state.
|
||||
* Will also report if the app reaches a later state (failed, killed, etc)
|
||||
* Kill application if duration!= null & time expires.
|
||||
* @param appId Application Id of application to be monitored
|
||||
* @param duration how long to wait -must be more than 0
|
||||
* @param desiredState desired state.
|
||||
* @return the application report -null on a timeout
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public ApplicationReport monitorAppToState(
|
||||
ApplicationId appId, YarnApplicationState desiredState, Duration duration)
|
||||
throws YarnException, IOException {
|
||||
|
||||
if (appId == null) {
|
||||
throw new BadCommandArgumentsException("null application ID");
|
||||
}
|
||||
if (duration.limit <= 0) {
|
||||
throw new BadCommandArgumentsException("Invalid monitoring duration");
|
||||
}
|
||||
log.debug("Waiting {} millis for app to reach state {} ",
|
||||
duration.limit,
|
||||
desiredState);
|
||||
duration.start();
|
||||
try {
|
||||
while (true) {
|
||||
// Get application report for the appId we are interested in
|
||||
|
||||
ApplicationReport r = getApplicationReport(appId);
|
||||
|
||||
log.debug("queried status is\n{}",
|
||||
new SliderUtils.OnDemandReportStringifier(r));
|
||||
|
||||
YarnApplicationState state = r.getYarnApplicationState();
|
||||
if (state.ordinal() >= desiredState.ordinal()) {
|
||||
log.debug("App in desired state (or higher) :{}", state);
|
||||
return r;
|
||||
}
|
||||
if (duration.getLimitExceeded()) {
|
||||
log.debug(
|
||||
"Wait limit of {} millis to get to state {}, exceeded; app status\n {}",
|
||||
duration.limit,
|
||||
desiredState,
|
||||
new SliderUtils.OnDemandReportStringifier(r));
|
||||
return null;
|
||||
}
|
||||
|
||||
// sleep 1s.
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ignored) {
|
||||
log.debug("Thread sleep in monitoring loop interrupted");
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
duration.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* find all live instances of a specific app -if there is >1 in the cluster,
|
||||
* this returns them all. State should be running or less
|
||||
|
|
|
@ -112,17 +112,6 @@ public class SliderApplicationIpcClient implements SliderApplicationApi {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void putDesiredResources(ConfTree updated) throws IOException {
|
||||
try {
|
||||
operations.flex(updated);
|
||||
} catch (IOException e) {
|
||||
throw convert(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public AggregateConf getResolvedModel() throws IOException {
|
||||
try {
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.apache.slider.api.ClusterNode;
|
|||
import org.apache.slider.api.SliderClusterProtocol;
|
||||
import org.apache.slider.api.StateValues;
|
||||
import org.apache.slider.api.proto.Messages;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.resource.Component;
|
||||
import org.apache.slider.api.types.ApplicationLivenessInformation;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.api.types.ContainerInformation;
|
||||
|
@ -39,6 +41,7 @@ import org.apache.slider.core.exceptions.NoSuchNodeException;
|
|||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.core.exceptions.WaitTimeoutException;
|
||||
import org.apache.slider.core.persist.ConfTreeSerDeser;
|
||||
import org.apache.slider.core.persist.JsonSerDeser;
|
||||
import org.codehaus.jackson.JsonParseException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -61,6 +64,8 @@ public class SliderClusterOperations {
|
|||
log = LoggerFactory.getLogger(SliderClusterOperations.class);
|
||||
|
||||
private final SliderClusterProtocol appMaster;
|
||||
private static final JsonSerDeser<Application> jsonSerDeser =
|
||||
new JsonSerDeser<Application>(Application.class);
|
||||
private static final Messages.EmptyPayloadProto EMPTY;
|
||||
static {
|
||||
EMPTY = Messages.EmptyPayloadProto.newBuilder().build();
|
||||
|
@ -130,48 +135,20 @@ public class SliderClusterOperations {
|
|||
* Connect to a live cluster and get its current state
|
||||
* @return its description
|
||||
*/
|
||||
public ClusterDescription getClusterDescription()
|
||||
throws YarnException, IOException {
|
||||
|
||||
public Application getApplication() throws YarnException, IOException {
|
||||
Messages.GetJSONClusterStatusRequestProto req =
|
||||
Messages.GetJSONClusterStatusRequestProto.newBuilder().build();
|
||||
Messages.GetJSONClusterStatusResponseProto resp =
|
||||
appMaster.getJSONClusterStatus(req);
|
||||
String statusJson = resp.getClusterSpec();
|
||||
try {
|
||||
return ClusterDescription.fromJson(statusJson);
|
||||
return jsonSerDeser.fromJson(statusJson);
|
||||
} catch (JsonParseException e) {
|
||||
log.error("Exception " + e + " parsing:\n" + statusJson, e);
|
||||
log.error("Error when parsing app json file", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the AM instance definition.
|
||||
* <p>
|
||||
* See {@link SliderClusterProtocol#getInstanceDefinition(Messages.GetInstanceDefinitionRequestProto)}
|
||||
* @return current slider AM aggregate definition
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public AggregateConf getInstanceDefinition()
|
||||
throws YarnException, IOException {
|
||||
Messages.GetInstanceDefinitionRequestProto.Builder builder =
|
||||
Messages.GetInstanceDefinitionRequestProto.newBuilder();
|
||||
|
||||
Messages.GetInstanceDefinitionRequestProto request = builder.build();
|
||||
Messages.GetInstanceDefinitionResponseProto response =
|
||||
appMaster.getInstanceDefinition(request);
|
||||
|
||||
ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser();
|
||||
|
||||
ConfTree internal = confTreeSerDeser.fromJson(response.getInternal());
|
||||
ConfTree resources = confTreeSerDeser.fromJson(response.getResources());
|
||||
ConfTree app = confTreeSerDeser.fromJson(response.getApplication());
|
||||
AggregateConf instanceDefinition =
|
||||
new AggregateConf(resources, app, internal);
|
||||
return instanceDefinition;
|
||||
}
|
||||
/**
|
||||
* Kill a container
|
||||
* @param id container ID
|
||||
|
@ -315,22 +292,14 @@ public class SliderClusterOperations {
|
|||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flex operation
|
||||
* @param resources new resources
|
||||
* @return the response
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean flex(ConfTree resources) throws IOException {
|
||||
Messages.FlexClusterRequestProto request =
|
||||
Messages.FlexClusterRequestProto.newBuilder()
|
||||
.setClusterSpec(resources.toJson())
|
||||
.build();
|
||||
Messages.FlexClusterResponseProto response = appMaster.flexCluster(request);
|
||||
return response.getResponse();
|
||||
public void flex(Component component) throws IOException{
|
||||
Messages.FlexComponentRequestProto request =
|
||||
Messages.FlexComponentRequestProto.newBuilder()
|
||||
.setNumberOfContainers(component.getNumberOfContainers().intValue())
|
||||
.setName(component.getName()).build();
|
||||
appMaster.flexComponent(request);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Commit (possibly delayed) AM suicide
|
||||
*
|
||||
|
|
|
@ -176,29 +176,6 @@ public class SliderApplicationApiRestClient extends BaseRestClient
|
|||
return new ConfTreeOperations(resource);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putDesiredResources(ConfTree updated) throws IOException {
|
||||
WebResource resource = applicationResource(MODEL_DESIRED_RESOURCES);
|
||||
try {
|
||||
|
||||
// put operation. The result is discarded; it does help validate
|
||||
// that the operation returned a JSON data structure as well as a 200
|
||||
// response.
|
||||
|
||||
resource.accept(MediaType.APPLICATION_JSON_TYPE)
|
||||
.type(MediaType.APPLICATION_JSON_TYPE)
|
||||
.entity(updated)
|
||||
.put(ConfTree.class);
|
||||
} catch (ClientHandlerException ex) {
|
||||
throw ExceptionConverter.convertJerseyException("PUT",
|
||||
resource.getURI().toString(),
|
||||
ex);
|
||||
} catch (UniformInterfaceException ex) {
|
||||
throw ExceptionConverter.convertJerseyException("PUT",
|
||||
resource.getURI().toString(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregateConf getResolvedModel() throws IOException {
|
||||
return getApplicationResource(MODEL_RESOLVED, AggregateConf.class);
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.slider.common;
|
||||
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -50,7 +52,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
|
|||
String SLIDER_DEPENDENCY_LOCALIZED_DIR_LINK = "slider_dep";
|
||||
String SLIDER_DEPENDENCY_HDP_PARENT_DIR = "/hdp";
|
||||
String SLIDER_DEPENDENCY_DIR = "/apps/%s/slider";
|
||||
String SLIDER_DEPENDENCY_TAR_GZ_FILE_NAME = "slider";
|
||||
String SLIDER_DEPENDENCY_TAR_GZ_FILE_NAME = "slider-dep";
|
||||
String SLIDER_DEPENDENCY_TAR_GZ_FILE_EXT = ".tar.gz";
|
||||
String SLIDER_DEPENDENCY_DIR_PERMISSIONS = "755";
|
||||
|
||||
|
@ -181,7 +183,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
|
|||
/**
|
||||
* name of generated dir for this conf: {@value}
|
||||
*/
|
||||
String SUBMITTED_CONF_DIR = "confdir";
|
||||
String SUBMITTED_CONF_DIR = "conf";
|
||||
|
||||
/**
|
||||
* Slider AM log4j file name : {@value}
|
||||
|
@ -227,7 +229,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
|
|||
*/
|
||||
String ADDONS_DIR = "addons";
|
||||
|
||||
String SLIDER_JAR = "slider.jar";
|
||||
String SLIDER_JAR = "slider-core.jar";
|
||||
String JCOMMANDER_JAR = "jcommander.jar";
|
||||
String GSON_JAR = "gson.jar";
|
||||
String DEFAULT_APP_PKG = "appPkg.zip";
|
||||
|
@ -238,7 +240,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
|
|||
String STDERR_AM = "slider-err.txt";
|
||||
String DEFAULT_GC_OPTS = "";
|
||||
|
||||
String HADOOP_USER_NAME = "HADOOP_USER_NAME";
|
||||
String HADOOP_USER_NAME = ApplicationConstants.Environment.USER.toString();
|
||||
String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
|
||||
String SLIDER_PASSPHRASE = "SLIDER_PASSPHRASE";
|
||||
|
||||
|
|
|
@ -73,6 +73,9 @@ public interface SliderXmlConfKeys {
|
|||
int DEFAULT_YARN_QUEUE_PRIORITY = 1;
|
||||
|
||||
|
||||
String KEY_AM_RESOURCE_MEM = "slider.am.resource.memory";
|
||||
long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
|
||||
|
||||
/**
|
||||
* The slider base path: {@value}
|
||||
* Defaults to HomeDir/.slider
|
||||
|
|
|
@ -69,7 +69,7 @@ public abstract class AbstractActionArgs extends ArgOps implements Arguments {
|
|||
/**
|
||||
-D name=value
|
||||
|
||||
Define an HBase configuration option which overrides any options in
|
||||
Define an configuration option which overrides any options in
|
||||
the configuration XML files of the image or in the image configuration
|
||||
directory. The values will be persisted.
|
||||
Configuration options are only passed to the cluster when creating or reconfiguring a cluster.
|
||||
|
|
|
@ -35,186 +35,13 @@ import java.util.Map;
|
|||
* Abstract Action to build things; shares args across build and
|
||||
* list
|
||||
*/
|
||||
public abstract class AbstractClusterBuildingActionArgs extends
|
||||
AbstractActionArgs {
|
||||
|
||||
/**
|
||||
* Declare the image configuration directory to use when creating or
|
||||
* reconfiguring a slider cluster. The path must be on a filesystem visible
|
||||
* to all nodes in the YARN cluster. Only one configuration directory can
|
||||
* be specified.
|
||||
*/
|
||||
@Parameter(names = ARG_CONFDIR,
|
||||
description = "Path to cluster configuration directory in HDFS",
|
||||
converter = PathArgumentConverter.class)
|
||||
public Path confdir;
|
||||
|
||||
@Parameter(names = ARG_ZKPATH,
|
||||
description = "Zookeeper path for the application")
|
||||
public String appZKPath;
|
||||
|
||||
@Parameter(names = ARG_ZKHOSTS,
|
||||
description = "comma separated list of the Zookeeper hosts")
|
||||
public String zkhosts;
|
||||
|
||||
/**
|
||||
* --image path
|
||||
* the full path to a .tar or .tar.gz path containing an HBase image.
|
||||
*/
|
||||
@Parameter(names = ARG_IMAGE,
|
||||
description = "The full path to a .tar or .tar.gz path containing the application",
|
||||
converter = PathArgumentConverter.class)
|
||||
public Path image;
|
||||
|
||||
@Parameter(names = ARG_APP_HOME,
|
||||
description = "Home directory of a pre-installed application")
|
||||
public String appHomeDir;
|
||||
|
||||
@Parameter(names = ARG_PROVIDER,
|
||||
description = "Provider of the specific cluster application")
|
||||
public String provider = SliderProviderFactory.DEFAULT_CLUSTER_TYPE;
|
||||
|
||||
@Parameter(names = {ARG_PACKAGE},
|
||||
description = "URI to a slider package")
|
||||
public String packageURI;
|
||||
|
||||
@Parameter(names = {ARG_RESOURCES},
|
||||
description = "File defining the resources of this instance")
|
||||
public File resources;
|
||||
|
||||
@Parameter(names = {ARG_TEMPLATE},
|
||||
description = "Template application configuration")
|
||||
public File template;
|
||||
|
||||
@Parameter(names = {ARG_METAINFO},
|
||||
description = "Application meta info file")
|
||||
public File appMetaInfo;
|
||||
|
||||
@Parameter(names = {ARG_METAINFO_JSON},
|
||||
description = "Application meta info JSON blob")
|
||||
public String appMetaInfoJson;
|
||||
|
||||
@Parameter(names = {ARG_APPDEF},
|
||||
description = "Application def (folder or a zip package)")
|
||||
public File appDef;
|
||||
|
||||
@Parameter(names = {ARG_QUEUE},
|
||||
description = "Queue to submit the application")
|
||||
public abstract class AbstractClusterBuildingActionArgs
|
||||
extends AbstractActionArgs {
|
||||
@Parameter(names = {
|
||||
ARG_QUEUE }, description = "Queue to submit the application")
|
||||
public String queue;
|
||||
|
||||
@Parameter(names = {ARG_LIFETIME},
|
||||
description = "Lifetime of the application from the time of request")
|
||||
@Parameter(names = {
|
||||
ARG_LIFETIME }, description = "Lifetime of the application from the time of request")
|
||||
public long lifetime;
|
||||
|
||||
@ParametersDelegate
|
||||
public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
|
||||
|
||||
@ParametersDelegate
|
||||
public AddonArgsDelegate addonDelegate = new AddonArgsDelegate();
|
||||
|
||||
|
||||
@ParametersDelegate
|
||||
public AppAndResouceOptionArgsDelegate optionsDelegate =
|
||||
new AppAndResouceOptionArgsDelegate();
|
||||
|
||||
|
||||
public Map<String, String> getOptionsMap() throws
|
||||
BadCommandArgumentsException {
|
||||
return optionsDelegate.getOptionsMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the role heap mapping (may be empty, but never null)
|
||||
* @return role heap mapping
|
||||
* @throws BadCommandArgumentsException parse problem
|
||||
*/
|
||||
public Map<String, Map<String, String>> getCompOptionMap() throws
|
||||
BadCommandArgumentsException {
|
||||
return optionsDelegate.getCompOptionMap();
|
||||
}
|
||||
|
||||
|
||||
public Map<String, String> getResourceOptionsMap() throws
|
||||
BadCommandArgumentsException {
|
||||
return optionsDelegate.getResourceOptionsMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the role heap mapping (may be empty, but never null)
|
||||
* @return role heap mapping
|
||||
* @throws BadCommandArgumentsException parse problem
|
||||
*/
|
||||
public Map<String, Map<String, String>> getResourceCompOptionMap() throws
|
||||
BadCommandArgumentsException {
|
||||
return optionsDelegate.getResourceCompOptionMap();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public List<String> getComponentTuples() {
|
||||
return componentDelegate.getComponentTuples();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the role mapping (may be empty, but never null)
|
||||
* @return role mapping
|
||||
* @throws BadCommandArgumentsException parse problem
|
||||
*/
|
||||
public Map<String, String> getComponentMap() throws
|
||||
BadCommandArgumentsException {
|
||||
return componentDelegate.getComponentMap();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public List<String> getAddonTuples() {
|
||||
return addonDelegate.getAddonTuples();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the list of addons (may be empty, but never null)
|
||||
*/
|
||||
public Map<String, String> getAddonMap() throws
|
||||
BadCommandArgumentsException {
|
||||
return addonDelegate.getAddonMap();
|
||||
}
|
||||
|
||||
public Path getConfdir() {
|
||||
return confdir;
|
||||
}
|
||||
|
||||
public String getAppZKPath() {
|
||||
return appZKPath;
|
||||
}
|
||||
|
||||
public String getZKhosts() {
|
||||
return zkhosts;
|
||||
}
|
||||
|
||||
public Path getImage() {
|
||||
return image;
|
||||
}
|
||||
|
||||
public String getAppHomeDir() {
|
||||
return appHomeDir;
|
||||
}
|
||||
|
||||
public String getProvider() {
|
||||
return provider;
|
||||
}
|
||||
|
||||
public ConfTree buildAppOptionsConfTree() throws
|
||||
BadCommandArgumentsException {
|
||||
return buildConfTree(getOptionsMap());
|
||||
}
|
||||
|
||||
public ConfTree buildResourceOptionsConfTree() throws
|
||||
BadCommandArgumentsException {
|
||||
return buildConfTree(getResourceOptionsMap());
|
||||
}
|
||||
|
||||
protected ConfTree buildConfTree(Map<String, String> optionsMap) throws
|
||||
BadCommandArgumentsException {
|
||||
ConfTree confTree = new ConfTree();
|
||||
confTree.global.putAll(optionsMap);
|
||||
return confTree;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.common.params;
|
||||
|
||||
import com.beust.jcommander.Parameters;
|
||||
|
||||
@Parameters(commandNames = {SliderActions.ACTION_BUILD},
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_BUILD)
|
||||
|
||||
public class ActionBuildArgs extends AbstractClusterBuildingActionArgs {
|
||||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_BUILD;
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.slider.common.params;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.beust.jcommander.ParametersDelegate;
|
||||
|
||||
|
@ -26,34 +27,19 @@ import java.io.File;
|
|||
@Parameters(commandNames = {SliderActions.ACTION_CREATE},
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_CREATE)
|
||||
|
||||
public class ActionCreateArgs extends AbstractClusterBuildingActionArgs
|
||||
implements WaitTimeAccessor, LaunchArgsAccessor {
|
||||
public class ActionCreateArgs extends AbstractClusterBuildingActionArgs {
|
||||
|
||||
@Parameter(names = {ARG_APPDEF},
|
||||
description = "Template application definition file in JSON format.")
|
||||
public File appDef;
|
||||
|
||||
public File getAppDef() {
|
||||
return appDef;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_CREATE;
|
||||
}
|
||||
|
||||
@ParametersDelegate
|
||||
LaunchArgsDelegate launchArgs = new LaunchArgsDelegate();
|
||||
|
||||
@Override
|
||||
public File getOutputFile() {
|
||||
return launchArgs.getOutputFile();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRmAddress() {
|
||||
return launchArgs.getRmAddress();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getWaittime() {
|
||||
return launchArgs.getWaittime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWaittime(int waittime) {
|
||||
launchArgs.setWaittime(waittime);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,37 +18,31 @@
|
|||
|
||||
package org.apache.slider.common.params;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.beust.jcommander.ParametersDelegate;
|
||||
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Parameters(commandNames = {SliderActions.ACTION_FLEX},
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_FLEX)
|
||||
|
||||
public class ActionFlexArgs extends AbstractActionArgs {
|
||||
|
||||
@Parameter(names = {ARG_COMPONENT},
|
||||
description = "component name")
|
||||
String componentName;
|
||||
|
||||
@Parameter(names = {ARG_COUNT},
|
||||
description = "number of containers>")
|
||||
long numberOfContainers;
|
||||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_FLEX;
|
||||
}
|
||||
|
||||
@ParametersDelegate
|
||||
public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
|
||||
|
||||
/**
|
||||
* Get the component mapping (may be empty, but never null)
|
||||
* @return mapping
|
||||
* @throws BadCommandArgumentsException parse problem
|
||||
*/
|
||||
public Map<String, String> getComponentMap() throws BadCommandArgumentsException {
|
||||
return componentDelegate.getComponentMap();
|
||||
public String getComponent() {
|
||||
return componentName;
|
||||
}
|
||||
|
||||
public List<String> getComponentTuples() {
|
||||
return componentDelegate.getComponentTuples();
|
||||
public long getNumberOfContainers() {
|
||||
return numberOfContainers;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,14 +22,14 @@ import com.beust.jcommander.Parameter;
|
|||
import com.beust.jcommander.Parameters;
|
||||
import com.beust.jcommander.ParametersDelegate;
|
||||
|
||||
@Parameters(commandNames = {SliderActions.ACTION_FREEZE},
|
||||
@Parameters(commandNames = {SliderActions.ACTION_STOP },
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_FREEZE)
|
||||
|
||||
public class ActionFreezeArgs extends AbstractActionArgs implements
|
||||
WaitTimeAccessor {
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_FREEZE;
|
||||
return SliderActions.ACTION_STOP;
|
||||
}
|
||||
|
||||
public static final String FREEZE_COMMAND_ISSUED = "stop command issued";
|
||||
|
|
|
@ -24,7 +24,7 @@ import com.beust.jcommander.ParametersDelegate;
|
|||
|
||||
import java.io.File;
|
||||
|
||||
@Parameters(commandNames = {SliderActions.ACTION_THAW},
|
||||
@Parameters(commandNames = {SliderActions.ACTION_START },
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_THAW)
|
||||
public class ActionThawArgs extends AbstractActionArgs implements
|
||||
WaitTimeAccessor,
|
||||
|
@ -33,7 +33,7 @@ public class ActionThawArgs extends AbstractActionArgs implements
|
|||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_THAW;
|
||||
return SliderActions.ACTION_START;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,56 +18,31 @@
|
|||
|
||||
package org.apache.slider.common.params;
|
||||
|
||||
import java.io.File;
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.Parameters;
|
||||
import com.beust.jcommander.ParametersDelegate;
|
||||
|
||||
@Parameters(commandNames = { SliderActions.ACTION_UPGRADE },
|
||||
commandDescription = SliderActions.DESCRIBE_ACTION_UPGRADE)
|
||||
public class ActionUpgradeArgs extends AbstractClusterBuildingActionArgs
|
||||
implements WaitTimeAccessor, LaunchArgsAccessor {
|
||||
public class ActionUpgradeArgs extends AbstractActionArgs {
|
||||
|
||||
@Override
|
||||
public String getActionName() {
|
||||
return SliderActions.ACTION_UPGRADE;
|
||||
}
|
||||
|
||||
@ParametersDelegate
|
||||
LaunchArgsDelegate launchArgs = new LaunchArgsDelegate();
|
||||
|
||||
@Override
|
||||
public File getOutputFile() {
|
||||
return launchArgs.getOutputFile();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRmAddress() {
|
||||
return launchArgs.getRmAddress();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getWaittime() {
|
||||
return launchArgs.getWaittime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWaittime(int waittime) {
|
||||
launchArgs.setWaittime(waittime);
|
||||
}
|
||||
|
||||
@Parameter(names={ARG_CONTAINERS}, variableArity = true,
|
||||
description = "stop specific containers")
|
||||
public List<String> containers = new ArrayList<>(0);
|
||||
|
||||
@Parameter(names={ARG_COMPONENTS}, variableArity = true,
|
||||
description = "stop all containers of specific components")
|
||||
public List<String> components = new ArrayList<>(0);
|
||||
|
||||
@Parameter(names = {ARG_FORCE},
|
||||
description = "force spec upgrade operation")
|
||||
public boolean force;
|
||||
// TODO upgrade container
|
||||
// @Parameter(names={ARG_CONTAINERS}, variableArity = true,
|
||||
// description = "stop specific containers")
|
||||
// public List<String> containers = new ArrayList<>(0);
|
||||
//
|
||||
// @Parameter(names={ARG_COMPONENTS}, variableArity = true,
|
||||
// description = "stop all containers of specific components")
|
||||
// public List<String> components = new ArrayList<>(0);
|
||||
//
|
||||
// @Parameter(names = {ARG_FORCE},
|
||||
// description = "force spec upgrade operation")
|
||||
// public boolean force;
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ public interface Arguments {
|
|||
String ARG_CLIENT = "--client";
|
||||
String ARG_CONFDIR = "--appconf";
|
||||
String ARG_COMPONENT = "--component";
|
||||
String ARG_COUNT = "--count";
|
||||
String ARG_COMPONENT_SHORT = "--comp";
|
||||
String ARG_COMPONENTS = "--components";
|
||||
String ARG_COMP_OPT= "--compopt";
|
||||
|
|
|
@ -53,7 +53,6 @@ public class ClientArgs extends CommonArgs {
|
|||
// =========================================================
|
||||
|
||||
private final ActionAMSuicideArgs actionAMSuicideArgs = new ActionAMSuicideArgs();
|
||||
private final ActionBuildArgs actionBuildArgs = new ActionBuildArgs();
|
||||
private final ActionClientArgs actionClientArgs = new ActionClientArgs();
|
||||
private final ActionCreateArgs actionCreateArgs = new ActionCreateArgs();
|
||||
private final ActionDependencyArgs actionDependencyArgs = new ActionDependencyArgs();
|
||||
|
@ -96,7 +95,6 @@ public class ClientArgs extends CommonArgs {
|
|||
|
||||
addActions(
|
||||
actionAMSuicideArgs,
|
||||
actionBuildArgs,
|
||||
actionClientArgs,
|
||||
actionCreateArgs,
|
||||
actionDependencyArgs,
|
||||
|
@ -155,10 +153,6 @@ public class ClientArgs extends CommonArgs {
|
|||
return actionAMSuicideArgs;
|
||||
}
|
||||
|
||||
public ActionBuildArgs getActionBuildArgs() {
|
||||
return actionBuildArgs;
|
||||
}
|
||||
|
||||
public ActionInstallPackageArgs getActionInstallPackageArgs() { return actionInstallPackageArgs; }
|
||||
|
||||
public ActionClientArgs getActionClientArgs() { return actionClientArgs; }
|
||||
|
@ -256,23 +250,17 @@ public class ClientArgs extends CommonArgs {
|
|||
action = ACTION_HELP;
|
||||
}
|
||||
switch (action) {
|
||||
case ACTION_BUILD:
|
||||
bindCoreAction(actionBuildArgs);
|
||||
//its a builder, so set those actions too
|
||||
buildingActionArgs = actionBuildArgs;
|
||||
break;
|
||||
|
||||
case ACTION_CREATE:
|
||||
bindCoreAction(actionCreateArgs);
|
||||
//its a builder, so set those actions too
|
||||
buildingActionArgs = actionCreateArgs;
|
||||
break;
|
||||
|
||||
case ACTION_FREEZE:
|
||||
case ACTION_STOP:
|
||||
bindCoreAction(actionFreezeArgs);
|
||||
break;
|
||||
|
||||
case ACTION_THAW:
|
||||
case ACTION_START:
|
||||
bindCoreAction(actionThawArgs);
|
||||
break;
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ public class SliderAMArgs extends CommonArgs {
|
|||
* This is the URI in the FS to the Slider cluster; the conf file (and any
|
||||
* other cluster-specifics) can be picked up here
|
||||
*/
|
||||
public String getSliderClusterURI() {
|
||||
public String getAppDefDir() {
|
||||
return createAction.sliderClusterURI;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ public interface SliderActions {
|
|||
String ACTION_ECHO = "echo";
|
||||
String ACTION_EXISTS = "exists";
|
||||
String ACTION_FLEX = "flex";
|
||||
String ACTION_FREEZE = "stop";
|
||||
String ACTION_STOP = "stop";
|
||||
String ACTION_HELP = "help";
|
||||
String ACTION_INSTALL_KEYTAB = "install-keytab";
|
||||
String ACTION_INSTALL_PACKAGE = "install-package";
|
||||
|
@ -53,7 +53,7 @@ public interface SliderActions {
|
|||
String ACTION_RESOLVE = "resolve";
|
||||
String ACTION_RESOURCE = "resource";
|
||||
String ACTION_STATUS = "status";
|
||||
String ACTION_THAW = "start";
|
||||
String ACTION_START = "start";
|
||||
String ACTION_TOKENS = "tokens";
|
||||
|
||||
String ACTION_VERSION = "version";
|
||||
|
@ -68,7 +68,7 @@ public interface SliderActions {
|
|||
String DESCRIBE_ACTION_UPDATE =
|
||||
"Update template for a Slider application";
|
||||
String DESCRIBE_ACTION_UPGRADE =
|
||||
"Rolling upgrade/downgrade the application to a newer/previous version";
|
||||
"Rolling upgrade/downgrade the component/containerto a newer/previous version";
|
||||
String DESCRIBE_ACTION_DESTROY =
|
||||
"Destroy a stopped Slider application";
|
||||
String DESCRIBE_ACTION_EXISTS =
|
||||
|
|
|
@ -511,6 +511,7 @@ public class CoreFileSystem {
|
|||
* @return the parent dir path of slider.tar.gz in HDFS
|
||||
*/
|
||||
public Path getDependencyPath() {
|
||||
// FIXME: 3/20/17 HDP ???????????
|
||||
String parentDir = (SliderUtils.isHdp()) ? SliderKeys.SLIDER_DEPENDENCY_HDP_PARENT_DIR
|
||||
+ SliderKeys.SLIDER_DEPENDENCY_DIR
|
||||
: SliderKeys.SLIDER_DEPENDENCY_DIR;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.slider.common.tools;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -29,6 +30,8 @@ import java.io.IOException;
|
|||
*/
|
||||
public class SliderFileSystem extends CoreFileSystem {
|
||||
|
||||
Path appDir = null;
|
||||
|
||||
public SliderFileSystem(FileSystem fileSystem,
|
||||
Configuration configuration) {
|
||||
super(fileSystem, configuration);
|
||||
|
@ -38,5 +41,11 @@ public class SliderFileSystem extends CoreFileSystem {
|
|||
super(configuration);
|
||||
}
|
||||
|
||||
public void setAppDir(Path appDir) {
|
||||
this.appDir = appDir;
|
||||
}
|
||||
|
||||
public Path getAppDir() {
|
||||
return this.appDir;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -499,9 +499,6 @@ public final class SliderUtils {
|
|||
"Source file not a file " + srcFile);
|
||||
}
|
||||
FileSystem destFS = FileSystem.get(destFile.toUri(), conf);
|
||||
if (destFS.exists(destFile)) {
|
||||
throw new IOException("Dest file already exists " + destFile);
|
||||
}
|
||||
FileUtil.copy(srcFS, srcFile, destFS, destFile, false, true, conf);
|
||||
}
|
||||
|
||||
|
@ -1221,6 +1218,29 @@ public final class SliderUtils {
|
|||
return buildEnvMap(roleOpts, null);
|
||||
}
|
||||
|
||||
|
||||
// Build env map: key -> value;
|
||||
// value will be replaced by the corresponding value in tokenMap, if any.
|
||||
public static Map<String, String> buildEnvMap(
|
||||
org.apache.slider.api.resource.Configuration conf,
|
||||
Map<String,String> tokenMap) {
|
||||
if (tokenMap == null) {
|
||||
return conf.getEnv();
|
||||
}
|
||||
Map<String, String> env = new HashMap<>();
|
||||
for (Map.Entry<String, String> entry : conf.getEnv().entrySet()) {
|
||||
String key = entry.getKey();
|
||||
String val = entry.getValue();
|
||||
for (Map.Entry<String,String> token : tokenMap.entrySet()) {
|
||||
val = val.replaceAll(Pattern.quote(token.getKey()),
|
||||
token.getValue());
|
||||
}
|
||||
env.put(key,val);
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
|
||||
public static Map<String, String> buildEnvMap(Map<String, String> roleOpts,
|
||||
Map<String,String> tokenMap) {
|
||||
Map<String, String> env = new HashMap<>();
|
||||
|
@ -1273,8 +1293,8 @@ public final class SliderUtils {
|
|||
* @param clustername cluster name
|
||||
* @throws BadCommandArgumentsException if it is invalid
|
||||
*/
|
||||
public static void validateClusterName(String clustername) throws
|
||||
BadCommandArgumentsException {
|
||||
public static void validateClusterName(String clustername)
|
||||
throws BadCommandArgumentsException {
|
||||
if (!isClusternameValid(clustername)) {
|
||||
throw new BadCommandArgumentsException(
|
||||
"Illegal cluster name: " + clustername);
|
||||
|
@ -1603,14 +1623,12 @@ public final class SliderUtils {
|
|||
* @param sliderConfDir relative path to the dir containing slider config
|
||||
* options to put on the classpath -or null
|
||||
* @param libdir directory containing the JAR files
|
||||
* @param config the configuration
|
||||
* @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use
|
||||
* (and hence the current classpath should be used, not anything built up)
|
||||
* @return a classpath
|
||||
*/
|
||||
public static ClasspathConstructor buildClasspath(String sliderConfDir,
|
||||
String libdir,
|
||||
Configuration config,
|
||||
SliderFileSystem sliderFileSystem,
|
||||
boolean usingMiniMRCluster) {
|
||||
|
||||
|
|
|
@ -1,233 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.core.launch;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
|
||||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import org.apache.slider.client.SliderYarnClientImpl;
|
||||
import org.apache.slider.common.tools.CoreFileSystem;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class AppMasterLauncher extends AbstractLauncher {
|
||||
|
||||
|
||||
private static final Logger log =
|
||||
LoggerFactory.getLogger(AppMasterLauncher.class);
|
||||
|
||||
public final YarnClientApplication application;
|
||||
public final String name;
|
||||
public final String type;
|
||||
public final ApplicationSubmissionContext submissionContext;
|
||||
public final ApplicationId appId;
|
||||
public final boolean secureCluster;
|
||||
private int maxAppAttempts = 0;
|
||||
private boolean keepContainersOverRestarts = true;
|
||||
private String queue = YarnConfiguration.DEFAULT_QUEUE_NAME;
|
||||
private int priority = 1;
|
||||
private final Resource resource = Records.newRecord(Resource.class);
|
||||
private final SliderYarnClientImpl yarnClient;
|
||||
private Long submitTime;
|
||||
|
||||
/**
|
||||
* Build the AM Launcher
|
||||
* @param name app name
|
||||
* @param type application type
|
||||
* @param conf hadoop config
|
||||
* @param fs filesystem binding
|
||||
* @param yarnClient yarn client
|
||||
* @param secureCluster flag to indicate secure cluster
|
||||
* @param options map of options. All values are extracted in this constructor only
|
||||
* @param resourceGlobalOptions global options
|
||||
* @param applicationTags any app tags
|
||||
* @param credentials initial set of credentials
|
||||
* @throws IOException
|
||||
* @throws YarnException
|
||||
*/
|
||||
public AppMasterLauncher(String name,
|
||||
String type,
|
||||
Configuration conf,
|
||||
CoreFileSystem fs,
|
||||
SliderYarnClientImpl yarnClient,
|
||||
boolean secureCluster,
|
||||
Map<String, String> options,
|
||||
Map<String, String> resourceGlobalOptions,
|
||||
Set<String> applicationTags,
|
||||
Credentials credentials) throws IOException, YarnException {
|
||||
super(conf, fs, credentials);
|
||||
this.yarnClient = yarnClient;
|
||||
this.application = yarnClient.createApplication();
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.secureCluster = secureCluster;
|
||||
|
||||
submissionContext = application.getApplicationSubmissionContext();
|
||||
appId = submissionContext.getApplicationId();
|
||||
// set the application name;
|
||||
submissionContext.setApplicationName(name);
|
||||
// app type used in service enum;
|
||||
submissionContext.setApplicationType(type);
|
||||
if (!applicationTags.isEmpty()) {
|
||||
submissionContext.setApplicationTags(applicationTags);
|
||||
}
|
||||
submissionContext.setNodeLabelExpression(extractLabelExpression(options));
|
||||
|
||||
extractAmRetryCount(submissionContext, resourceGlobalOptions);
|
||||
extractResourceRequirements(resource, options);
|
||||
extractLogAggregationContext(resourceGlobalOptions);
|
||||
}
|
||||
|
||||
public void setMaxAppAttempts(int maxAppAttempts) {
|
||||
this.maxAppAttempts = maxAppAttempts;
|
||||
}
|
||||
|
||||
public void setKeepContainersOverRestarts(boolean keepContainersOverRestarts) {
|
||||
this.keepContainersOverRestarts = keepContainersOverRestarts;
|
||||
}
|
||||
|
||||
|
||||
public Resource getResource() {
|
||||
return resource;
|
||||
}
|
||||
|
||||
public void setMemory(int memory) {
|
||||
resource.setMemory(memory);
|
||||
}
|
||||
|
||||
public void setVirtualCores(int cores) {
|
||||
resource.setVirtualCores(cores);
|
||||
}
|
||||
|
||||
public ApplicationId getApplicationId() {
|
||||
return appId;
|
||||
}
|
||||
|
||||
public int getMaxAppAttempts() {
|
||||
return maxAppAttempts;
|
||||
}
|
||||
|
||||
public boolean isKeepContainersOverRestarts() {
|
||||
return keepContainersOverRestarts;
|
||||
}
|
||||
|
||||
public String getQueue() {
|
||||
return queue;
|
||||
}
|
||||
|
||||
public int getPriority() {
|
||||
return priority;
|
||||
}
|
||||
|
||||
public void setQueue(String queue) {
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
public void setPriority(int priority) {
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete the launch context (copy in env vars, etc).
|
||||
* @return the container to launch
|
||||
*/
|
||||
public ApplicationSubmissionContext completeAppMasterLaunch()
|
||||
throws IOException {
|
||||
|
||||
//queue priority
|
||||
Priority pri = Records.newRecord(Priority.class);
|
||||
pri.setPriority(priority);
|
||||
submissionContext.setPriority(pri);
|
||||
|
||||
// Set the queue to which this application is to be submitted in the RM
|
||||
// Queue for App master
|
||||
|
||||
submissionContext.setQueue(queue);
|
||||
|
||||
|
||||
//container requirements
|
||||
submissionContext.setResource(resource);
|
||||
submissionContext.setLogAggregationContext(logAggregationContext);
|
||||
|
||||
if (keepContainersOverRestarts) {
|
||||
log.debug("Requesting cluster stays running over AM failure");
|
||||
submissionContext.setKeepContainersAcrossApplicationAttempts(true);
|
||||
}
|
||||
|
||||
if (maxAppAttempts > 0) {
|
||||
log.debug("Setting max AM attempts to {}", maxAppAttempts);
|
||||
submissionContext.setMaxAppAttempts(maxAppAttempts);
|
||||
}
|
||||
|
||||
if (secureCluster) {
|
||||
//tokens
|
||||
log.debug("Credentials: {}",
|
||||
CredentialUtils.dumpTokens(getCredentials(), "\n"));
|
||||
|
||||
} else {
|
||||
propagateUsernameInInsecureCluster();
|
||||
}
|
||||
completeContainerLaunch();
|
||||
submissionContext.setAMContainerSpec(containerLaunchContext);
|
||||
return submissionContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit the application.
|
||||
* @return a launched application representing the submitted application
|
||||
* @throws IOException
|
||||
* @throws YarnException
|
||||
*/
|
||||
public LaunchedApplication submitApplication() throws IOException, YarnException {
|
||||
completeAppMasterLaunch();
|
||||
log.info("Submitting application to Resource Manager");
|
||||
ApplicationId applicationId =
|
||||
yarnClient.submitApplication(submissionContext);
|
||||
// implicit success; record the time
|
||||
submitTime = System.currentTimeMillis();
|
||||
return new LaunchedApplication(applicationId, yarnClient);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a serializable application report. This is a very minimal
|
||||
* report that contains the application Id, name and type —the information
|
||||
* available
|
||||
* @return a data structure which can be persisted
|
||||
*/
|
||||
public SerializedApplicationReport createSerializedApplicationReport() {
|
||||
SerializedApplicationReport sar = new SerializedApplicationReport();
|
||||
sar.applicationId = appId.toString();
|
||||
sar.name = name;
|
||||
sar.applicationType = type;
|
||||
sar.queue = queue;
|
||||
sar.submitTime = submitTime;
|
||||
return sar;
|
||||
}
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.core.launch;
|
||||
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.slider.client.SliderYarnClientImpl;
|
||||
import org.apache.slider.common.tools.Duration;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Launched App with logic around it.
|
||||
*/
|
||||
public class LaunchedApplication {
|
||||
|
||||
protected final ApplicationId applicationId;
|
||||
protected final SliderYarnClientImpl yarnClient;
|
||||
|
||||
public LaunchedApplication(ApplicationId applicationId,
|
||||
SliderYarnClientImpl yarnClient) {
|
||||
assert applicationId != null;
|
||||
assert yarnClient != null;
|
||||
this.applicationId = applicationId;
|
||||
this.yarnClient = yarnClient;
|
||||
}
|
||||
|
||||
public LaunchedApplication(SliderYarnClientImpl yarnClient,
|
||||
ApplicationReport report) {
|
||||
this.yarnClient = yarnClient;
|
||||
this.applicationId = report.getApplicationId();
|
||||
}
|
||||
|
||||
public ApplicationId getApplicationId() {
|
||||
return applicationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitor the submitted application for reaching the requested state.
|
||||
* Will also report if the app reaches a later state (failed, killed, etc)
|
||||
* Kill application if duration!= null & time expires.
|
||||
* @param duration how long to wait -must be more than 0
|
||||
* @param desiredState desired state.
|
||||
* @return the application report -null on a timeout
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public ApplicationReport monitorAppToState(YarnApplicationState desiredState, Duration duration)
|
||||
throws YarnException, IOException {
|
||||
return yarnClient.monitorAppToState(applicationId, desiredState, duration);
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill the submitted application by sending a call to the ASM
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean forceKill(String reason)
|
||||
throws YarnException, IOException {
|
||||
if (applicationId != null) {
|
||||
yarnClient.killRunningApplication(applicationId, reason);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill the application
|
||||
* @return the response
|
||||
* @throws YarnException YARN problems
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
public KillApplicationResponse kill(String reason) throws
|
||||
YarnException,
|
||||
IOException {
|
||||
return yarnClient.killRunningApplication(applicationId, reason);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the application report of this application
|
||||
* @return an application report
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public ApplicationReport getApplicationReport()
|
||||
throws YarnException, IOException {
|
||||
return yarnClient.getApplicationReport(applicationId);
|
||||
}
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.core.launch;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.slider.api.SliderClusterProtocol;
|
||||
import org.apache.slider.client.SliderYarnClientImpl;
|
||||
import org.apache.slider.common.SliderExitCodes;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.server.appmaster.rpc.RpcBinder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.slider.common.Constants.CONNECT_TIMEOUT;
|
||||
import static org.apache.slider.common.Constants.RPC_TIMEOUT;
|
||||
|
||||
/**
|
||||
* A running application built from an app report. This one
|
||||
* can be talked to
|
||||
*/
|
||||
public class RunningApplication extends LaunchedApplication {
|
||||
|
||||
private final ApplicationReport applicationReport;
|
||||
public RunningApplication(SliderYarnClientImpl yarnClient,
|
||||
ApplicationReport applicationReport) {
|
||||
super(yarnClient, applicationReport);
|
||||
this.applicationReport = applicationReport;
|
||||
}
|
||||
|
||||
public ApplicationReport getApplicationReport() {
|
||||
return applicationReport;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Connect to a Slider AM
|
||||
* @param app application report providing the details on the application
|
||||
* @return an instance
|
||||
* @throws YarnException
|
||||
* @throws IOException
|
||||
*/
|
||||
public SliderClusterProtocol connect(ApplicationReport app) throws
|
||||
YarnException,
|
||||
IOException {
|
||||
|
||||
try {
|
||||
return RpcBinder.getProxy(yarnClient.getConfig(),
|
||||
yarnClient.getRmClient(),
|
||||
app,
|
||||
CONNECT_TIMEOUT,
|
||||
RPC_TIMEOUT);
|
||||
} catch (InterruptedException e) {
|
||||
throw new SliderException(SliderExitCodes.EXIT_TIMED_OUT,
|
||||
e,
|
||||
"Interrupted waiting for communications with the Application Master");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,263 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.core.persist;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
|
||||
import org.apache.slider.common.params.Arguments;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
|
||||
import org.apache.slider.core.exceptions.BadConfigException;
|
||||
import org.apache.slider.providers.agent.AgentKeys;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Class to prepare and persist app and add-on definitions.
|
||||
*
|
||||
* In this case, the app definition and add-on definitions are auto-inferred from the user input rather than explicit
|
||||
* inclusion of application package in the config.
|
||||
*
|
||||
* Processing an app definition involves one or more of the following: - modify appConfig - package definition into a
|
||||
* temporary folder - upload to HDFS
|
||||
*
|
||||
* This class keeps track of all the required operations and allows them to be invoked by build operation
|
||||
*/
|
||||
public class AppDefinitionPersister {
|
||||
private static final Logger log =
|
||||
LoggerFactory.getLogger(AppDefinitionPersister.class);
|
||||
|
||||
private final SliderFileSystem sliderFileSystem;
|
||||
private List<AppDefinition> appDefinitions;
|
||||
|
||||
public AppDefinitionPersister(SliderFileSystem sliderFileSystem) {
|
||||
this.sliderFileSystem = sliderFileSystem;
|
||||
appDefinitions = new ArrayList<>();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Process the application package or folder by copying it to the cluster path
|
||||
*
|
||||
* @param appDefinition details of application package
|
||||
*
|
||||
* @throws BadConfigException
|
||||
* @throws IOException
|
||||
*/
|
||||
private void persistDefinitionPackageOrFolder(AppDefinition appDefinition)
|
||||
throws BadConfigException, IOException {
|
||||
if (!appDefinition.appDefPkgOrFolder.canRead()) {
|
||||
throw new BadConfigException("Pkg/Folder cannot be accessed - "
|
||||
+ appDefinition.appDefPkgOrFolder.getAbsolutePath());
|
||||
}
|
||||
|
||||
File src = appDefinition.appDefPkgOrFolder;
|
||||
String targetName = appDefinition.pkgName;
|
||||
log.debug("Package name: " + targetName);
|
||||
if (appDefinition.appDefPkgOrFolder.isDirectory()) {
|
||||
log.info("Processing app package/folder {} for {}",
|
||||
appDefinition.appDefPkgOrFolder.getAbsolutePath(),
|
||||
appDefinition.pkgName);
|
||||
File tmpDir = Files.createTempDir();
|
||||
File zipFile = new File(tmpDir.getCanonicalPath(), File.separator + appDefinition.pkgName);
|
||||
SliderUtils.zipFolder(appDefinition.appDefPkgOrFolder, zipFile);
|
||||
src = zipFile;
|
||||
}
|
||||
|
||||
sliderFileSystem.getFileSystem().copyFromLocalFile(
|
||||
false,
|
||||
false,
|
||||
new Path(src.toURI()),
|
||||
new Path(appDefinition.targetFolderInFs, targetName));
|
||||
}
|
||||
|
||||
public void persistPackages() throws BadConfigException, IOException {
|
||||
for (AppDefinition appDefinition : appDefinitions) {
|
||||
persistDefinitionPackageOrFolder(appDefinition);
|
||||
}
|
||||
}
|
||||
|
||||
public void processSuppliedDefinitions(String clustername,
|
||||
AbstractClusterBuildingActionArgs buildInfo,
|
||||
ConfTreeOperations appConf)
|
||||
throws BadConfigException, IOException, BadCommandArgumentsException {
|
||||
// if metainfo is provided add to the app instance
|
||||
if (buildInfo.appMetaInfo != null || buildInfo.appMetaInfoJson != null) {
|
||||
if (buildInfo.appMetaInfo != null && buildInfo.appMetaInfoJson != null) {
|
||||
throw new BadConfigException("Both %s and %s cannot be specified",
|
||||
Arguments.ARG_METAINFO, Arguments.ARG_METAINFO_JSON);
|
||||
}
|
||||
|
||||
// Now we know that only one of either file or JSON is used
|
||||
boolean isFileUsed = buildInfo.appMetaInfo != null ? true : false;
|
||||
String argUsed = isFileUsed ? Arguments.ARG_METAINFO
|
||||
: Arguments.ARG_METAINFO_JSON;
|
||||
|
||||
if (buildInfo.appDef != null) {
|
||||
throw new BadConfigException("Both %s and %s cannot be specified",
|
||||
argUsed, Arguments.ARG_APPDEF);
|
||||
}
|
||||
if (SliderUtils.isSet(appConf.getGlobalOptions().get(AgentKeys.APP_DEF))) {
|
||||
throw new BadConfigException(
|
||||
"%s cannot not be set if %s is specified in the cmd line ",
|
||||
AgentKeys.APP_DEF, argUsed);
|
||||
}
|
||||
|
||||
if (isFileUsed) {
|
||||
if (!buildInfo.appMetaInfo.canRead() || !buildInfo.appMetaInfo.isFile()) {
|
||||
throw new BadConfigException(
|
||||
"Path specified with %s either cannot be read or is not a file",
|
||||
Arguments.ARG_METAINFO);
|
||||
}
|
||||
} else {
|
||||
if (StringUtils.isEmpty(buildInfo.appMetaInfoJson.trim())) {
|
||||
throw new BadConfigException("Empty string specified with %s",
|
||||
Arguments.ARG_METAINFO_JSON);
|
||||
}
|
||||
}
|
||||
|
||||
File tempDir = Files.createTempDir();
|
||||
File pkgSrcDir = new File(tempDir, "default");
|
||||
if (!pkgSrcDir.exists() && !pkgSrcDir.mkdirs()) {
|
||||
throw new IOException("Failed to create directory: " + pkgSrcDir);
|
||||
}
|
||||
File destMetaInfo = new File(pkgSrcDir, "metainfo.json");
|
||||
if (isFileUsed) {
|
||||
if (buildInfo.appMetaInfo.getName().endsWith(".xml")) {
|
||||
Files.copy(buildInfo.appMetaInfo, new File(pkgSrcDir, "metainfo.xml"));
|
||||
} else {
|
||||
Files.copy(buildInfo.appMetaInfo, destMetaInfo);
|
||||
}
|
||||
} else {
|
||||
Files.write(
|
||||
buildInfo.appMetaInfoJson.getBytes(Charset.forName("UTF-8")),
|
||||
destMetaInfo);
|
||||
}
|
||||
|
||||
Path appDirPath = sliderFileSystem.buildAppDefDirPath(clustername);
|
||||
log.info("Using default app def path {}", appDirPath.toString());
|
||||
|
||||
appDefinitions.add(new AppDefinition(appDirPath, pkgSrcDir,
|
||||
SliderKeys.DEFAULT_APP_PKG));
|
||||
Path appDefPath = new Path(appDirPath, SliderKeys.DEFAULT_APP_PKG);
|
||||
appConf.getGlobalOptions().set(AgentKeys.APP_DEF, appDefPath);
|
||||
log.info("Setting app package to {}.", appDefPath);
|
||||
}
|
||||
|
||||
if (buildInfo.appDef != null) {
|
||||
if (SliderUtils.isSet(appConf.getGlobalOptions().get(AgentKeys.APP_DEF))) {
|
||||
throw new BadConfigException("application.def must not be set if --appdef is provided.");
|
||||
}
|
||||
|
||||
if (!buildInfo.appDef.exists()) {
|
||||
throw new BadConfigException("--appdef is not a valid path.");
|
||||
}
|
||||
|
||||
Path appDirPath = sliderFileSystem.buildAppDefDirPath(clustername);
|
||||
appDefinitions.add(new AppDefinition(appDirPath, buildInfo.appDef, SliderKeys.DEFAULT_APP_PKG));
|
||||
Path appDefPath = new Path(appDirPath, SliderKeys.DEFAULT_APP_PKG);
|
||||
appConf.getGlobalOptions().set(AgentKeys.APP_DEF, appDefPath);
|
||||
log.info("Setting app package to {}.", appDefPath);
|
||||
}
|
||||
|
||||
if (buildInfo.addonDelegate.getAddonMap().size() > 0) {
|
||||
if (SliderUtils.isUnset(appConf.getGlobalOptions().get(AgentKeys.APP_DEF))) {
|
||||
throw new BadConfigException("addon package can only be specified if main app package is specified.");
|
||||
}
|
||||
|
||||
List<String> addons = new ArrayList<String>();
|
||||
Map<String, String> addonMap = buildInfo.addonDelegate.getAddonMap();
|
||||
for (Map.Entry<String, String > entry : addonMap.entrySet()) {
|
||||
String key = entry.getKey();
|
||||
String value = entry.getValue();
|
||||
if (SliderUtils.isUnset(value)) {
|
||||
throw new BadConfigException("Invalid path for addon package " + key);
|
||||
}
|
||||
File defPath = new File(value);
|
||||
if (!defPath.exists()) {
|
||||
throw new BadConfigException("addon folder or package path is not valid.");
|
||||
}
|
||||
|
||||
Path addonPath = sliderFileSystem.buildAddonDirPath(clustername, key);
|
||||
String addonPkgName = "addon_" + key + ".zip";
|
||||
|
||||
log.debug(
|
||||
"addonMap.get(key): {} addonPath: {} defPath: {} addonPkgName: {}",
|
||||
addonMap.get(key), addonPath, defPath, addonPkgName);
|
||||
|
||||
appDefinitions.add(new AppDefinition(addonPath, defPath, addonPkgName));
|
||||
String addOnKey = AgentKeys.ADDON_PREFIX + key;
|
||||
Path addonPkgPath = new Path(addonPath, addonPkgName);
|
||||
log.info("Setting addon package {} to {}.", addOnKey, addonPkgPath);
|
||||
appConf.getGlobalOptions().set(addOnKey, addonPkgPath);
|
||||
addons.add(addOnKey);
|
||||
}
|
||||
|
||||
String existingList = appConf.getGlobalOptions().get(AgentKeys.ADDONS);
|
||||
if (SliderUtils.isUnset(existingList)) {
|
||||
existingList = "";
|
||||
}
|
||||
appConf.getGlobalOptions().set(AgentKeys.ADDONS, existingList + StringUtils.join(addons, ","));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@VisibleForTesting
|
||||
public List<AppDefinitionPersister.AppDefinition> getAppDefinitions() {
|
||||
return appDefinitions;
|
||||
}
|
||||
|
||||
// Helper class to hold details for the app and addon packages
|
||||
static class AppDefinition {
|
||||
// The target folder where the package will be stored
|
||||
public Path targetFolderInFs;
|
||||
// The on disk location of the app def package or folder
|
||||
public File appDefPkgOrFolder;
|
||||
// Package name
|
||||
public String pkgName;
|
||||
|
||||
public AppDefinition(Path targetFolderInFs, File appDefPkgOrFolder, String pkgName) {
|
||||
this.targetFolderInFs = targetFolderInFs;
|
||||
this.appDefPkgOrFolder = appDefPkgOrFolder;
|
||||
this.pkgName = pkgName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder().append("targetFolderInFs").append(" : ").append(targetFolderInFs.toString())
|
||||
.append(", ")
|
||||
.append("appDefPkgOrFolder").append(" : ").append(appDefPkgOrFolder.toString())
|
||||
.append(", ")
|
||||
.append("pkgName").append(" : ").append(pkgName).toString();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -29,6 +29,7 @@ import org.codehaus.jackson.JsonParseException;
|
|||
import org.codehaus.jackson.map.DeserializationConfig;
|
||||
import org.codehaus.jackson.map.JsonMappingException;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.codehaus.jackson.map.PropertyNamingStrategy;
|
||||
import org.codehaus.jackson.map.SerializationConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -65,6 +66,11 @@ public class JsonSerDeser<T> {
|
|||
mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
||||
}
|
||||
|
||||
public JsonSerDeser(Class<T> classType, PropertyNamingStrategy namingStrategy) {
|
||||
this(classType);
|
||||
mapper.setPropertyNamingStrategy(namingStrategy);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert from JSON
|
||||
* @param json input
|
||||
|
|
|
@ -102,19 +102,6 @@ public abstract class AbstractClientProvider extends Configured {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Any provider-side alteration of a configuration can take place here.
|
||||
* @param aggregateConf config to patch
|
||||
* @throws IOException IO problems
|
||||
* @throws SliderException Slider-specific issues
|
||||
*/
|
||||
public void prepareInstanceConfiguration(AggregateConf aggregateConf) throws
|
||||
SliderException,
|
||||
IOException {
|
||||
//default: do nothing
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Prepare the AM settings for launch
|
||||
* @param fileSystem filesystem
|
||||
|
@ -234,7 +221,7 @@ public abstract class AbstractClientProvider extends Configured {
|
|||
* @param appDescription brief description of the application
|
||||
* @return
|
||||
*/
|
||||
public final Set<String> createApplicationTags(String appName,
|
||||
public static final Set<String> createApplicationTags(String appName,
|
||||
String appVersion, String appDescription) {
|
||||
Set<String> tags = new HashSet<>();
|
||||
tags.add(SliderUtils.createNameTag(appName));
|
||||
|
|
|
@ -1,438 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.providers;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
|
||||
import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
|
||||
import org.apache.hadoop.registry.client.types.AddressTypes;
|
||||
import org.apache.hadoop.registry.client.types.Endpoint;
|
||||
import org.apache.hadoop.registry.client.types.ServiceRecord;
|
||||
import org.apache.hadoop.service.Service;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.client.api.AMRMClient;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.common.tools.ConfigHelper;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.core.main.ExitCodeProvider;
|
||||
import org.apache.slider.server.appmaster.actions.QueueAccess;
|
||||
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
|
||||
import org.apache.slider.server.appmaster.state.ContainerReleaseSelector;
|
||||
import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
import org.apache.slider.server.services.workflow.ForkedProcessService;
|
||||
import org.apache.slider.server.services.workflow.ServiceParent;
|
||||
import org.apache.slider.server.services.workflow.WorkflowSequenceService;
|
||||
import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* The base class for provider services. It lets the implementations
|
||||
* add sequences of operations, and propagates service failures
|
||||
* upstream
|
||||
*/
|
||||
public abstract class AbstractProviderService
|
||||
extends WorkflowSequenceService
|
||||
implements
|
||||
ProviderCore,
|
||||
SliderKeys,
|
||||
ProviderService {
|
||||
private static final Logger log =
|
||||
LoggerFactory.getLogger(AbstractProviderService.class);
|
||||
protected StateAccessForProviders amState;
|
||||
protected URL amWebAPI;
|
||||
protected YarnRegistryViewForProviders yarnRegistry;
|
||||
protected QueueAccess queueAccess;
|
||||
|
||||
protected AbstractProviderService(String name) {
|
||||
super(name);
|
||||
setStopIfNoChildServicesAtStartup(false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConf() {
|
||||
return getConfig();
|
||||
}
|
||||
|
||||
public StateAccessForProviders getAmState() {
|
||||
return amState;
|
||||
}
|
||||
|
||||
public QueueAccess getQueueAccess() {
|
||||
return queueAccess;
|
||||
}
|
||||
|
||||
public void setAmState(StateAccessForProviders amState) {
|
||||
this.amState = amState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHumanName() {
|
||||
return getName().toLowerCase(Locale.ENGLISH);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bind(StateAccessForProviders stateAccessor,
|
||||
QueueAccess queueAccess,
|
||||
List<Container> liveContainers) {
|
||||
this.amState = stateAccessor;
|
||||
this.queueAccess = queueAccess;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
|
||||
this.yarnRegistry = yarnRegistry;
|
||||
}
|
||||
|
||||
public YarnRegistryViewForProviders getYarnRegistry() {
|
||||
return yarnRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void notifyContainerCompleted(ContainerId containerId) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Load default Configuration
|
||||
* @param confDir configuration directory
|
||||
* @return configuration
|
||||
* @throws BadCommandArgumentsException
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public Configuration loadProviderConfigurationInformation(File confDir)
|
||||
throws BadCommandArgumentsException, IOException {
|
||||
return new Configuration(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a specific XML configuration file for the provider config
|
||||
* @param confDir configuration directory
|
||||
* @param siteXMLFilename provider-specific filename
|
||||
* @return a configuration to be included in status
|
||||
* @throws BadCommandArgumentsException argument problems
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
protected Configuration loadProviderConfigurationInformation(File confDir,
|
||||
String siteXMLFilename)
|
||||
throws BadCommandArgumentsException, IOException {
|
||||
Configuration siteConf;
|
||||
File siteXML = new File(confDir, siteXMLFilename);
|
||||
if (!siteXML.exists()) {
|
||||
throw new BadCommandArgumentsException(
|
||||
"Configuration directory %s doesn't contain %s - listing is %s",
|
||||
confDir, siteXMLFilename, SliderUtils.listDir(confDir));
|
||||
}
|
||||
|
||||
//now read it in
|
||||
siteConf = ConfigHelper.loadConfFromFile(siteXML);
|
||||
log.info("{} file is at {}", siteXMLFilename, siteXML);
|
||||
log.info(ConfigHelper.dumpConfigToString(siteConf));
|
||||
return siteConf;
|
||||
}
|
||||
|
||||
/**
|
||||
* No-op implementation of this method.
|
||||
*/
|
||||
@Override
|
||||
public void initializeApplicationConfiguration(
|
||||
AggregateConf instanceDefinition, SliderFileSystem fileSystem,
|
||||
String roleGroup)
|
||||
throws IOException, SliderException {
|
||||
}
|
||||
|
||||
/**
|
||||
* No-op implementation of this method.
|
||||
*
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void validateApplicationConfiguration(AggregateConf instance,
|
||||
File confDir,
|
||||
boolean secure)
|
||||
throws IOException, SliderException {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan through the roles and see if it is supported.
|
||||
* @param role role to look for
|
||||
* @return true if the role is known about -and therefore
|
||||
* that a launcher thread can be deployed to launch it
|
||||
*/
|
||||
@Override
|
||||
public boolean isSupportedRole(String role) {
|
||||
Collection<ProviderRole> roles = getRoles();
|
||||
for (ProviderRole providedRole : roles) {
|
||||
if (providedRole.name.equals(role)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* override point to allow a process to start executing in this container
|
||||
* @param instanceDefinition cluster description
|
||||
* @param confDir configuration directory
|
||||
* @param env environment
|
||||
* @param execInProgress the callback for the exec events
|
||||
* @return false
|
||||
* @throws IOException
|
||||
* @throws SliderException
|
||||
*/
|
||||
@Override
|
||||
public boolean exec(AggregateConf instanceDefinition,
|
||||
File confDir,
|
||||
Map<String, String> env,
|
||||
ProviderCompleted execInProgress) throws IOException, SliderException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
|
||||
@Override // ExitCodeProvider
|
||||
public int getExitCode() {
|
||||
Throwable cause = getFailureCause();
|
||||
if (cause != null) {
|
||||
//failed for some reason
|
||||
if (cause instanceof ExitCodeProvider) {
|
||||
return ((ExitCodeProvider) cause).getExitCode();
|
||||
}
|
||||
}
|
||||
ForkedProcessService lastProc = latestProcess();
|
||||
if (lastProc == null || !lastProc.isProcessTerminated()) {
|
||||
return 0;
|
||||
} else {
|
||||
return lastProc.getExitCode();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the latest forked process service that ran
|
||||
* @return the forkes service
|
||||
*/
|
||||
protected ForkedProcessService latestProcess() {
|
||||
Service current = getActiveService();
|
||||
Service prev = getPreviousService();
|
||||
|
||||
Service latest = current != null ? current : prev;
|
||||
if (latest instanceof ForkedProcessService) {
|
||||
return (ForkedProcessService) latest;
|
||||
} else {
|
||||
//its a composite object, so look inside it for a process
|
||||
if (latest instanceof ServiceParent) {
|
||||
return getFPSFromParentService((ServiceParent) latest);
|
||||
} else {
|
||||
//no match
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Given a parent service, find the one that is a forked process
|
||||
* @param serviceParent parent
|
||||
* @return the forked process service or null if there is none
|
||||
*/
|
||||
protected ForkedProcessService getFPSFromParentService(ServiceParent serviceParent) {
|
||||
List<Service> services = serviceParent.getServices();
|
||||
for (Service s : services) {
|
||||
if (s instanceof ForkedProcessService) {
|
||||
return (ForkedProcessService) s;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* if we are already running, start this service
|
||||
*/
|
||||
protected void maybeStartCommandSequence() {
|
||||
if (isInState(STATE.STARTED)) {
|
||||
startNextService();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new forked process service with the given
|
||||
* name, environment and command list -then add it as a child
|
||||
* for execution in the sequence.
|
||||
*
|
||||
* @param name command name
|
||||
* @param env environment
|
||||
* @param commands command line
|
||||
* @throws IOException
|
||||
* @throws SliderException
|
||||
*/
|
||||
protected ForkedProcessService queueCommand(String name,
|
||||
Map<String, String> env,
|
||||
List<String> commands) throws
|
||||
IOException,
|
||||
SliderException {
|
||||
ForkedProcessService process = buildProcess(name, env, commands);
|
||||
//register the service for lifecycle management; when this service
|
||||
//is terminated, so is the master process
|
||||
addService(process);
|
||||
return process;
|
||||
}
|
||||
|
||||
public ForkedProcessService buildProcess(String name,
|
||||
Map<String, String> env,
|
||||
List<String> commands) throws
|
||||
IOException,
|
||||
SliderException {
|
||||
ForkedProcessService process;
|
||||
process = new ForkedProcessService(name);
|
||||
process.init(getConfig());
|
||||
process.build(env, commands);
|
||||
return process;
|
||||
}
|
||||
|
||||
/*
|
||||
* Build the provider status, can be empty
|
||||
* @return the provider status - map of entries to add to the info section
|
||||
*/
|
||||
@Override
|
||||
public Map<String, String> buildProviderStatus() {
|
||||
return new HashMap<String, String>();
|
||||
}
|
||||
|
||||
/*
|
||||
Build the monitor details. The base implementation includes all the external URL endpoints
|
||||
in the external view
|
||||
*/
|
||||
@Override
|
||||
public Map<String, MonitorDetail> buildMonitorDetails(ClusterDescription clusterDesc) {
|
||||
Map<String, MonitorDetail> details = new LinkedHashMap<String, MonitorDetail>();
|
||||
|
||||
// add in all the endpoints
|
||||
buildEndpointDetails(details);
|
||||
|
||||
return details;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void buildEndpointDetails(Map<String, MonitorDetail> details) {
|
||||
ServiceRecord self = yarnRegistry.getSelfRegistration();
|
||||
|
||||
List<Endpoint> externals = self.external;
|
||||
for (Endpoint endpoint : externals) {
|
||||
String addressType = endpoint.addressType;
|
||||
if (AddressTypes.ADDRESS_URI.equals(addressType)) {
|
||||
try {
|
||||
List<URL> urls = RegistryTypeUtils.retrieveAddressURLs(endpoint);
|
||||
if (!urls.isEmpty()) {
|
||||
details.put(endpoint.api, new MonitorDetail(urls.get(0).toString(), true));
|
||||
}
|
||||
} catch (InvalidRecordException | MalformedURLException ignored) {
|
||||
// Ignored
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyInitialRegistryDefinitions(URL amWebURI,
|
||||
ServiceRecord serviceRecord)
|
||||
throws IOException {
|
||||
this.amWebAPI = amWebURI;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
*
|
||||
* @return The base implementation returns the most recent containers first.
|
||||
*/
|
||||
@Override
|
||||
public ContainerReleaseSelector createContainerReleaseSelector() {
|
||||
return new MostRecentContainerReleaseSelector();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void releaseAssignedContainer(ContainerId containerId) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addContainerRequest(AMRMClient.ContainerRequest req) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancelSingleRequest(AMRMClient.ContainerRequest request) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public int cancelContainerRequests(Priority priority1,
|
||||
Priority priority2,
|
||||
int count) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateBlacklist(List<String> blacklistAdditions,
|
||||
List<String> blacklistRemovals) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(List<AbstractRMOperation> operations) {
|
||||
for (AbstractRMOperation operation : operations) {
|
||||
operation.execute(this);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* No-op implementation of this method.
|
||||
*/
|
||||
@Override
|
||||
public void rebuildContainerDetails(List<Container> liveContainers,
|
||||
String applicationId, Map<Integer, ProviderRole> providerRoles) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean processContainerStatus(ContainerId containerId,
|
||||
ContainerStatus status) {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -19,9 +19,6 @@
|
|||
package org.apache.slider.providers;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.ConfTree;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
|
||||
import java.util.List;
|
||||
public interface ProviderCore {
|
||||
|
@ -31,13 +28,4 @@ public interface ProviderCore {
|
|||
List<ProviderRole> getRoles();
|
||||
|
||||
Configuration getConf();
|
||||
|
||||
/**
|
||||
* Verify that an instance definition is considered valid by the provider
|
||||
* @param instanceDefinition instance definition
|
||||
* @throws SliderException if the configuration is not valid
|
||||
*/
|
||||
void validateInstanceDefinition(AggregateConf instanceDefinition) throws
|
||||
SliderException;
|
||||
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package org.apache.slider.providers;
|
||||
|
||||
import org.apache.slider.api.ResourceKeys;
|
||||
import org.apache.slider.api.resource.Component;
|
||||
|
||||
/**
|
||||
* Provider role and key for use in app requests.
|
||||
|
@ -34,16 +34,8 @@ public final class ProviderRole {
|
|||
public int nodeFailureThreshold;
|
||||
public final long placementTimeoutSeconds;
|
||||
public final String labelExpression;
|
||||
public final Component component;
|
||||
|
||||
public ProviderRole(String name, int id) {
|
||||
this(name,
|
||||
name,
|
||||
id,
|
||||
PlacementPolicy.DEFAULT,
|
||||
ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
|
||||
ResourceKeys.DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS,
|
||||
ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a provider role
|
||||
|
@ -67,7 +59,7 @@ public final class ProviderRole {
|
|||
policy,
|
||||
nodeFailureThreshold,
|
||||
placementTimeoutSeconds,
|
||||
labelExpression);
|
||||
labelExpression, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -87,7 +79,8 @@ public final class ProviderRole {
|
|||
int policy,
|
||||
int nodeFailureThreshold,
|
||||
long placementTimeoutSeconds,
|
||||
String labelExpression) {
|
||||
String labelExpression,
|
||||
Component component) {
|
||||
this.name = name;
|
||||
if (group == null) {
|
||||
this.group = name;
|
||||
|
@ -99,6 +92,8 @@ public final class ProviderRole {
|
|||
this.nodeFailureThreshold = nodeFailureThreshold;
|
||||
this.placementTimeoutSeconds = placementTimeoutSeconds;
|
||||
this.labelExpression = labelExpression;
|
||||
this.component = component;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,18 +18,15 @@
|
|||
|
||||
package org.apache.slider.providers;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.registry.client.types.ServiceRecord;
|
||||
import org.apache.hadoop.service.Service;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.MapOperations;
|
||||
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.core.launch.ContainerLauncher;
|
||||
import org.apache.slider.core.main.ExitCodeProvider;
|
||||
|
@ -45,128 +42,17 @@ import java.net.URL;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public interface ProviderService extends ProviderCore,
|
||||
Service,
|
||||
RMOperationHandlerActions,
|
||||
ExitCodeProvider {
|
||||
public interface ProviderService extends Service {
|
||||
|
||||
/**
|
||||
* Set up the entire container launch context
|
||||
* @param containerLauncher
|
||||
* @param instanceDefinition
|
||||
* @param container
|
||||
* @param providerRole
|
||||
* @param sliderFileSystem
|
||||
* @param generatedConfPath
|
||||
* @param appComponent
|
||||
* @param containerTmpDirPath
|
||||
*/
|
||||
void buildContainerLaunchContext(ContainerLauncher containerLauncher,
|
||||
AggregateConf instanceDefinition,
|
||||
Container container,
|
||||
ProviderRole providerRole,
|
||||
SliderFileSystem sliderFileSystem,
|
||||
Path generatedConfPath,
|
||||
MapOperations resourceComponent,
|
||||
MapOperations appComponent,
|
||||
Path containerTmpDirPath) throws
|
||||
IOException,
|
||||
SliderException;
|
||||
Application application, Container container, ProviderRole providerRole,
|
||||
SliderFileSystem sliderFileSystem) throws IOException, SliderException;
|
||||
|
||||
/**
|
||||
* Notify the providers of container completion
|
||||
* @param containerId container that has completed
|
||||
*/
|
||||
void notifyContainerCompleted(ContainerId containerId);
|
||||
|
||||
/**
|
||||
* Execute a process in the AM
|
||||
* @param instanceDefinition cluster description
|
||||
* @param confDir configuration directory
|
||||
* @param env environment
|
||||
* @param execInProgress the callback for the exec events
|
||||
* @return true if a process was actually started
|
||||
* @throws IOException
|
||||
* @throws SliderException
|
||||
*/
|
||||
boolean exec(AggregateConf instanceDefinition,
|
||||
File confDir,
|
||||
Map<String, String> env,
|
||||
ProviderCompleted execInProgress) throws IOException,
|
||||
SliderException;
|
||||
|
||||
/**
|
||||
* Scan through the roles and see if it is supported.
|
||||
* @param role role to look for
|
||||
* @return true if the role is known about -and therefore
|
||||
* that a launcher thread can be deployed to launch it
|
||||
*/
|
||||
boolean isSupportedRole(String role);
|
||||
|
||||
/**
|
||||
* Load a specific XML configuration file for the provider config
|
||||
* @param confDir configuration directory
|
||||
* @return a configuration to be included in status
|
||||
* @throws BadCommandArgumentsException
|
||||
* @throws IOException
|
||||
*/
|
||||
Configuration loadProviderConfigurationInformation(File confDir)
|
||||
throws BadCommandArgumentsException, IOException;
|
||||
|
||||
/**
|
||||
* The application configuration should be initialized here
|
||||
*
|
||||
* @param instanceDefinition
|
||||
* @param fileSystem
|
||||
* @param roleGroup
|
||||
* @throws IOException
|
||||
* @throws SliderException
|
||||
*/
|
||||
void initializeApplicationConfiguration(AggregateConf instanceDefinition,
|
||||
SliderFileSystem fileSystem, String roleGroup) throws IOException,
|
||||
SliderException;
|
||||
|
||||
/**
|
||||
* This is a validation of the application configuration on the AM.
|
||||
* Here is where things like the existence of keytabs and other
|
||||
* not-seen-client-side properties can be tested, before
|
||||
* the actual process is spawned.
|
||||
* @param instanceDefinition clusterSpecification
|
||||
* @param confDir configuration directory
|
||||
* @param secure flag to indicate that secure mode checks must exist
|
||||
* @throws IOException IO problemsn
|
||||
* @throws SliderException any failure
|
||||
*/
|
||||
void validateApplicationConfiguration(AggregateConf instanceDefinition,
|
||||
File confDir,
|
||||
boolean secure
|
||||
) throws IOException, SliderException;
|
||||
|
||||
/*
|
||||
* Build the provider status, can be empty
|
||||
* @return the provider status - map of entries to add to the info section
|
||||
*/
|
||||
Map<String, String> buildProviderStatus();
|
||||
|
||||
/**
|
||||
* Build a map of data intended for the AM webapp that is specific
|
||||
* about this provider. The key is some text to be displayed, and the
|
||||
* value can be a URL that will create an anchor over the key text.
|
||||
*
|
||||
* If no anchor is needed/desired, insert the key with a null value.
|
||||
* @return the details
|
||||
*/
|
||||
Map<String, MonitorDetail> buildMonitorDetails(ClusterDescription clusterSpec);
|
||||
|
||||
/**
|
||||
* Get a human friendly name for web UIs and messages
|
||||
* @return a name string. Default is simply the service instance name.
|
||||
*/
|
||||
String getHumanName();
|
||||
|
||||
public void bind(StateAccessForProviders stateAccessor,
|
||||
QueueAccess queueAccess,
|
||||
List<Container> liveContainers);
|
||||
void setAMState(StateAccessForProviders stateAccessForProviders);
|
||||
|
||||
/**
|
||||
* Bind to the YARN registry
|
||||
|
@ -174,39 +60,6 @@ public interface ProviderService extends ProviderCore,
|
|||
*/
|
||||
void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry);
|
||||
|
||||
/**
|
||||
* Build up the endpoint details for this service
|
||||
* @param details
|
||||
*/
|
||||
void buildEndpointDetails(Map<String, MonitorDetail> details);
|
||||
|
||||
/**
|
||||
* Prior to going live -register the initial service registry data
|
||||
* @param amWebURI URL to the AM. This may be proxied, so use relative paths
|
||||
* @param serviceRecord service record to build up
|
||||
*/
|
||||
void applyInitialRegistryDefinitions(URL amWebURI,
|
||||
ServiceRecord serviceRecord)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Create the container release selector for this provider...any policy
|
||||
* can be implemented
|
||||
* @return the selector to use for choosing containers.
|
||||
*/
|
||||
ContainerReleaseSelector createContainerReleaseSelector();
|
||||
|
||||
/**
|
||||
* On AM restart (for whatever reason) this API is required to rebuild the AM
|
||||
* internal state with the containers which were already assigned and running
|
||||
*
|
||||
* @param liveContainers
|
||||
* @param applicationId
|
||||
* @param providerRoles
|
||||
*/
|
||||
void rebuildContainerDetails(List<Container> liveContainers,
|
||||
String applicationId, Map<Integer, ProviderRole> providerRoles);
|
||||
|
||||
/**
|
||||
* Process container status
|
||||
* @return true if status needs to be requested again, false otherwise
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,7 +22,6 @@ import org.apache.slider.common.SliderKeys;
|
|||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.core.exceptions.BadConfigException;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.providers.AbstractClientProvider;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
|
@ -30,14 +29,10 @@ import org.apache.slider.providers.ProviderUtils;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.slider.providers.docker.DockerKeys.DOCKER_IMAGE;
|
||||
|
||||
public class DockerClientProvider extends AbstractClientProvider
|
||||
implements SliderKeys {
|
||||
|
||||
|
@ -64,35 +59,7 @@ public class DockerClientProvider extends AbstractClientProvider
|
|||
public void validateInstanceDefinition(AggregateConf instanceDefinition,
|
||||
SliderFileSystem fs) throws SliderException {
|
||||
super.validateInstanceDefinition(instanceDefinition, fs);
|
||||
|
||||
ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
|
||||
ConfTreeOperations resources = instanceDefinition.getResourceOperations();
|
||||
|
||||
for (String roleGroup : resources.getComponentNames()) {
|
||||
if (roleGroup.equals(COMPONENT_AM)) {
|
||||
continue;
|
||||
}
|
||||
if (appConf.getComponentOpt(roleGroup, DOCKER_IMAGE, null) == null &&
|
||||
appConf.getGlobalOptions().get(DOCKER_IMAGE) == null) {
|
||||
throw new BadConfigException("Property " + DOCKER_IMAGE + " not " +
|
||||
"specified for " + roleGroup);
|
||||
}
|
||||
|
||||
providerUtils.getPackages(roleGroup, appConf);
|
||||
|
||||
if (appConf.getComponentOptBool(roleGroup, AM_CONFIG_GENERATION, false)) {
|
||||
// build and localize configuration files
|
||||
Map<String, Map<String, String>> configurations =
|
||||
providerUtils.buildConfigurations(appConf, appConf, null,
|
||||
null, roleGroup, roleGroup, null);
|
||||
try {
|
||||
providerUtils.localizeConfigFiles(null, roleGroup, roleGroup, appConf,
|
||||
configurations, null, fs, null);
|
||||
} catch (IOException e) {
|
||||
throw new BadConfigException(e.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
//TODO validate Application payload, part of that is already done in ApplicationApiService, need to do more
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,286 +17,129 @@
|
|||
*/
|
||||
package org.apache.slider.providers.docker;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.registry.client.types.ServiceRecord;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResource;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResourceType;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.ClusterNode;
|
||||
import org.apache.slider.api.OptionKeys;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.resource.Component;
|
||||
import org.apache.slider.api.resource.ContainerState;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.core.conf.MapOperations;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.core.launch.CommandLineBuilder;
|
||||
import org.apache.slider.core.launch.ContainerLauncher;
|
||||
import org.apache.slider.core.registry.docstore.ConfigFormat;
|
||||
import org.apache.slider.core.registry.docstore.ConfigUtils;
|
||||
import org.apache.slider.core.registry.docstore.ExportEntry;
|
||||
import org.apache.slider.providers.AbstractProviderService;
|
||||
import org.apache.slider.providers.MonitorDetail;
|
||||
import org.apache.slider.providers.ProviderCore;
|
||||
import org.apache.slider.core.registry.docstore.PublishedConfiguration;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
import org.apache.slider.providers.ProviderService;
|
||||
import org.apache.slider.providers.ProviderUtils;
|
||||
import org.apache.slider.server.appmaster.state.RoleInstance;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Scanner;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class DockerProviderService extends AbstractProviderService implements
|
||||
ProviderCore,
|
||||
DockerKeys,
|
||||
SliderKeys {
|
||||
public class DockerProviderService extends AbstractService
|
||||
implements ProviderService, DockerKeys, SliderKeys {
|
||||
|
||||
protected static final Logger log =
|
||||
LoggerFactory.getLogger(DockerProviderService.class);
|
||||
private static final ProviderUtils providerUtils = new ProviderUtils(log);
|
||||
private static final String EXPORT_GROUP = "quicklinks";
|
||||
private static final String APPLICATION_TAG = "application";
|
||||
private static final String HOST_KEY_FORMAT = "${%s_HOST}";
|
||||
private static final String IP_KEY_FORMAT = "${%s_IP}";
|
||||
private static final String VARIABLE_INDICATOR = "${";
|
||||
|
||||
private String clusterName = null;
|
||||
private SliderFileSystem fileSystem = null;
|
||||
|
||||
private final Map<String, Set<ExportEntry>> exportMap =
|
||||
new ConcurrentHashMap<>();
|
||||
private static final String QUICK_LINKS = "quicklinks";
|
||||
protected StateAccessForProviders amState;
|
||||
protected YarnRegistryViewForProviders yarnRegistry;
|
||||
|
||||
protected DockerProviderService() {
|
||||
super("DockerProviderService");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ProviderRole> getRoles() {
|
||||
return Collections.emptyList();
|
||||
public void setAMState(StateAccessForProviders stateAccessor) {
|
||||
this.amState = stateAccessor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSupportedRole(String role) {
|
||||
return true;
|
||||
public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
|
||||
this.yarnRegistry = yarnRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateInstanceDefinition(AggregateConf instanceDefinition)
|
||||
throws SliderException {
|
||||
}
|
||||
|
||||
private String getClusterName() {
|
||||
if (SliderUtils.isUnset(clusterName)) {
|
||||
clusterName = getAmState().getInternalsSnapshot().get(OptionKeys.APPLICATION_NAME);
|
||||
}
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void buildContainerLaunchContext(ContainerLauncher launcher,
|
||||
AggregateConf instanceDefinition, Container container,
|
||||
ProviderRole providerRole, SliderFileSystem fileSystem,
|
||||
Path generatedConfPath, MapOperations resourceComponent,
|
||||
MapOperations appComponent, Path containerTmpDirPath)
|
||||
Application application, Container container, ProviderRole providerRole,
|
||||
SliderFileSystem fileSystem)
|
||||
throws IOException, SliderException {
|
||||
|
||||
String roleName = providerRole.name;
|
||||
String roleGroup = providerRole.group;
|
||||
|
||||
log.info("Build launch context for Docker");
|
||||
log.debug(instanceDefinition.toString());
|
||||
|
||||
ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
|
||||
Component component = providerRole.component;
|
||||
launcher.setYarnDockerMode(true);
|
||||
launcher.setDockerImage(appConf.getComponentOpt(roleGroup, DOCKER_IMAGE,
|
||||
null));
|
||||
launcher.setDockerNetwork(appConf.getComponentOpt(roleGroup, DOCKER_NETWORK,
|
||||
DEFAULT_DOCKER_NETWORK));
|
||||
launcher.setRunPrivilegedContainer(appConf.getComponentOptBool(roleGroup,
|
||||
DOCKER_USE_PRIVILEGED, DEFAULT_DOCKER_USE_PRIVILEGED));
|
||||
launcher.setDockerImage(component.getArtifact().getId());
|
||||
launcher.setDockerNetwork(component.getConfiguration()
|
||||
.getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK));
|
||||
launcher.setRunPrivilegedContainer(component.getRunPrivilegedContainer());
|
||||
|
||||
// Set the environment
|
||||
Map<String, String> standardTokens = providerUtils.getStandardTokenMap(
|
||||
getAmState().getAppConfSnapshot(), getAmState().getInternalsSnapshot(),
|
||||
roleName, roleGroup, container.getId().toString(), getClusterName());
|
||||
Map<String, String> replaceTokens = providerUtils.filterSiteOptions(
|
||||
appConf.getComponent(roleGroup).options, standardTokens);
|
||||
replaceTokens.putAll(standardTokens);
|
||||
launcher.putEnv(SliderUtils.buildEnvMap(appComponent, replaceTokens));
|
||||
// Generate tokens (key-value pair) for config substitution.
|
||||
Map<String, String> standardTokens = providerUtils
|
||||
.getStandardTokenMap(application.getConfiguration(),
|
||||
component.getConfiguration(), roleName, roleGroup,
|
||||
container.getId().toString(), application.getName());
|
||||
Map<String, String> tokensForSubstitution = providerUtils.substituteConfigs(
|
||||
component.getConfiguration().getProperties(), standardTokens);
|
||||
|
||||
String workDir = ApplicationConstants.Environment.PWD.$();
|
||||
launcher.setEnv("WORK_DIR", workDir);
|
||||
log.info("WORK_DIR set to {}", workDir);
|
||||
String logDir = ApplicationConstants.LOG_DIR_EXPANSION_VAR;
|
||||
launcher.setEnv("LOG_DIR", logDir);
|
||||
log.info("LOG_DIR set to {}", logDir);
|
||||
tokensForSubstitution.putAll(standardTokens);
|
||||
|
||||
// Set the environment variables
|
||||
launcher.putEnv(SliderUtils
|
||||
.buildEnvMap(component.getConfiguration(), tokensForSubstitution));
|
||||
launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
|
||||
launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
|
||||
if (System.getenv(HADOOP_USER_NAME) != null) {
|
||||
launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
|
||||
}
|
||||
//add english env
|
||||
launcher.setEnv("LANG", "en_US.UTF-8");
|
||||
launcher.setEnv("LC_ALL", "en_US.UTF-8");
|
||||
launcher.setEnv("LANGUAGE", "en_US.UTF-8");
|
||||
|
||||
//local resources
|
||||
providerUtils.localizePackages(launcher, fileSystem, appConf, roleGroup,
|
||||
getClusterName());
|
||||
for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
|
||||
tokensForSubstitution.put("${" + entry.getKey() + "}", entry.getValue());
|
||||
}
|
||||
|
||||
providerUtils.addRoleHostTokens(tokensForSubstitution, amState);
|
||||
|
||||
log.info("Token for substitution: " + tokensForSubstitution);
|
||||
|
||||
if (SliderUtils.isHadoopClusterSecure(getConfig())) {
|
||||
providerUtils.localizeServiceKeytabs(launcher, instanceDefinition,
|
||||
fileSystem, getClusterName());
|
||||
//TODO localize key tabs, WHY is this code needed ? WHY DOES CONTAINER REQUIRE AM KEYTAB??
|
||||
providerUtils.localizeServiceKeytabs(launcher, fileSystem, application);
|
||||
}
|
||||
|
||||
if (appComponent.getOptionBool(AM_CONFIG_GENERATION, false)) {
|
||||
// build and localize configuration files
|
||||
Map<String, Map<String, String>> configurations =
|
||||
providerUtils.buildConfigurations(
|
||||
instanceDefinition.getAppConfOperations(),
|
||||
instanceDefinition.getInternalOperations(),
|
||||
container.getId().toString(), getClusterName(),
|
||||
roleName, roleGroup, getAmState());
|
||||
providerUtils.localizeConfigFiles(launcher, roleName, roleGroup,
|
||||
appConf, configurations, launcher.getEnv(), fileSystem,
|
||||
getClusterName());
|
||||
}
|
||||
|
||||
//add the configuration resources
|
||||
launcher.addLocalResources(fileSystem.submitDirectory(
|
||||
generatedConfPath,
|
||||
PROPAGATED_CONF_DIR_NAME));
|
||||
// create config file on hdfs and add local resource
|
||||
providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
|
||||
component, tokensForSubstitution, amState);
|
||||
|
||||
CommandLineBuilder operation = new CommandLineBuilder();
|
||||
operation.add(appConf.getComponentOpt(roleGroup, DOCKER_START_COMMAND,
|
||||
"/bin/bash"));
|
||||
|
||||
operation.add(component.getLaunchCommand());
|
||||
operation.add("> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/"
|
||||
+ OUT_FILE + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/"
|
||||
+ ERR_FILE);
|
||||
|
||||
launcher.addCommand(operation.build());
|
||||
|
||||
// Additional files to localize
|
||||
String appResourcesString = instanceDefinition.getAppConfOperations()
|
||||
.getGlobalOptions().getOption(APP_RESOURCES, null);
|
||||
log.info("Configuration value for extra resources to localize: {}", appResourcesString);
|
||||
if (null != appResourcesString) {
|
||||
try (Scanner scanner = new Scanner(appResourcesString).useDelimiter(",")) {
|
||||
while (scanner.hasNext()) {
|
||||
String resource = scanner.next();
|
||||
Path resourcePath = new Path(resource);
|
||||
LocalResource extraResource = fileSystem.createAmResource(
|
||||
fileSystem.getFileSystem().resolvePath(resourcePath),
|
||||
LocalResourceType.FILE);
|
||||
String destination = APP_RESOURCES_DIR + "/" + resourcePath.getName();
|
||||
log.info("Localizing {} to {}", resourcePath, destination);
|
||||
// TODO Can we try harder to avoid collisions?
|
||||
launcher.addLocalResource(destination, extraResource);
|
||||
}
|
||||
}
|
||||
}
|
||||
// publish exports
|
||||
// TODO move this to app level, no need to do this for every container launch
|
||||
providerUtils
|
||||
.substituteConfigs(application.getQuicklinks(), tokensForSubstitution);
|
||||
PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS,
|
||||
application.getQuicklinks().entrySet());
|
||||
amState.getPublishedSliderConfigurations().put(QUICK_LINKS, pubconf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initializeApplicationConfiguration(
|
||||
AggregateConf instanceDefinition, SliderFileSystem fileSystem,
|
||||
String roleGroup)
|
||||
throws IOException, SliderException {
|
||||
this.fileSystem = fileSystem;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyInitialRegistryDefinitions(URL amWebURI,
|
||||
ServiceRecord serviceRecord)
|
||||
throws IOException {
|
||||
super.applyInitialRegistryDefinitions(amWebURI, serviceRecord);
|
||||
|
||||
// identify client component
|
||||
String clientName = null;
|
||||
ConfTreeOperations appConf = getAmState().getAppConfSnapshot();
|
||||
for (String component : appConf.getComponentNames()) {
|
||||
if (COMPONENT_TYPE_CLIENT.equals(appConf.getComponentOpt(component,
|
||||
COMPONENT_TYPE_KEY, null))) {
|
||||
clientName = component;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (clientName == null) {
|
||||
log.info("No client component specified, not publishing client configs");
|
||||
return;
|
||||
}
|
||||
|
||||
// register AM-generated client configs
|
||||
// appConf should already be resolved!
|
||||
MapOperations clientOperations = appConf.getComponent(clientName);
|
||||
if (!clientOperations.getOptionBool(AM_CONFIG_GENERATION, false)) {
|
||||
log.info("AM config generation is false, not publishing client configs");
|
||||
return;
|
||||
}
|
||||
|
||||
// build and localize configuration files
|
||||
Map<String, Map<String, String>> configurations =
|
||||
providerUtils.buildConfigurations(appConf, getAmState()
|
||||
.getInternalsSnapshot(), null, getClusterName(), clientName,
|
||||
clientName, getAmState());
|
||||
|
||||
for (Map.Entry<String, Map<String, String>> entry : configurations.entrySet()) {
|
||||
String configFileDN = entry.getKey();
|
||||
String configFileName = appConf.getComponentOpt(clientName,
|
||||
OptionKeys.CONF_FILE_PREFIX + configFileDN + OptionKeys
|
||||
.NAME_SUFFIX, null);
|
||||
String configFileType = appConf.getComponentOpt(clientName,
|
||||
OptionKeys.CONF_FILE_PREFIX + configFileDN + OptionKeys
|
||||
.TYPE_SUFFIX, null);
|
||||
if (configFileName == null || configFileType == null) {
|
||||
continue;
|
||||
}
|
||||
ConfigFormat configFormat = ConfigFormat.resolve(configFileType);
|
||||
|
||||
Map<String, String> config = entry.getValue();
|
||||
ConfigUtils.prepConfigForTemplateOutputter(configFormat, config,
|
||||
fileSystem, getClusterName(),
|
||||
new File(configFileName).getName());
|
||||
providerUtils.publishApplicationInstanceData(configFileDN, configFileDN,
|
||||
config.entrySet(), getAmState());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void notifyContainerCompleted(ContainerId containerId) {
|
||||
if (containerId != null) {
|
||||
String containerIdStr = containerId.toString();
|
||||
log.info("Removing container exports for {}", containerIdStr);
|
||||
for (Set<ExportEntry> exportEntries : exportMap.values()) {
|
||||
for (Iterator<ExportEntry> iter = exportEntries.iterator();
|
||||
iter.hasNext();) {
|
||||
ExportEntry entry = iter.next();
|
||||
if (containerIdStr.equals(entry.getContainerId())) {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean processContainerStatus(ContainerId containerId,
|
||||
ContainerStatus status) {
|
||||
log.debug("Handling container status: {}", status);
|
||||
|
@ -304,144 +147,24 @@ public class DockerProviderService extends AbstractProviderService implements
|
|||
SliderUtils.isUnset(status.getHost())) {
|
||||
return true;
|
||||
}
|
||||
RoleInstance instance = getAmState().getOwnedContainer(containerId);
|
||||
RoleInstance instance = amState.getOwnedContainer(containerId);
|
||||
if (instance == null) {
|
||||
// container is completed?
|
||||
return false;
|
||||
}
|
||||
|
||||
String roleName = instance.role;
|
||||
String roleGroup = instance.group;
|
||||
String containerIdStr = containerId.toString();
|
||||
|
||||
providerUtils.updateServiceRecord(getAmState(), yarnRegistry,
|
||||
containerIdStr, roleName, status.getIPs(), status.getHost());
|
||||
|
||||
publishExportGroups(containerIdStr, roleName, roleGroup,
|
||||
status.getHost(), status.getIPs());
|
||||
providerUtils.updateServiceRecord(amState, yarnRegistry,
|
||||
containerId.toString(), instance.role, status.getIPs(), status.getHost());
|
||||
// TODO publish ip and host
|
||||
org.apache.slider.api.resource.Container container =
|
||||
instance.providerRole.component.getContainer(containerId.toString());
|
||||
if (container != null) {
|
||||
container.setIp(StringUtils.join(",", status.getIPs()));
|
||||
container.setHostname(status.getHost());
|
||||
container.setState(ContainerState.READY);
|
||||
} else {
|
||||
log.warn(containerId + " not found in Application!");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method looks for configuration properties of the form
|
||||
* export.key,value and publishes the key,value pair. Standard tokens are
|
||||
* substituted into the value, and COMPONENTNAME_HOST and THIS_HOST tokens
|
||||
* are substituted with the actual hostnames of the containers.
|
||||
*/
|
||||
protected void publishExportGroups(String containerId,
|
||||
String roleName, String roleGroup, String thisHost, List<String> ips) {
|
||||
ConfTreeOperations appConf = getAmState().getAppConfSnapshot();
|
||||
ConfTreeOperations internalsConf = getAmState().getInternalsSnapshot();
|
||||
|
||||
Map<String, String> exports = providerUtils.getExports(
|
||||
getAmState().getAppConfSnapshot(), roleGroup);
|
||||
|
||||
// publish export groups if any
|
||||
Map<String, String> standardTokens = providerUtils.getStandardTokenMap(
|
||||
appConf, internalsConf, roleName, roleGroup, containerId,
|
||||
getClusterName());
|
||||
Map<String, String> replaceTokens = providerUtils.filterSiteOptions(
|
||||
appConf.getComponent(roleGroup).options, standardTokens);
|
||||
replaceTokens.putAll(standardTokens);
|
||||
|
||||
String roleNameKey = providerUtils.getNameKey(roleName, roleGroup,
|
||||
appConf);
|
||||
String roleNameIPKey = null;
|
||||
if (roleNameKey != null) {
|
||||
replaceTokens.put(String.format(HOST_KEY_FORMAT, roleNameKey), thisHost);
|
||||
roleNameIPKey = Pattern.quote(String.format(IP_KEY_FORMAT, roleNameKey));
|
||||
} else {
|
||||
// should not happen, but log if it does
|
||||
log.info("Not replacing HOST or IP tokens because key was null for {}",
|
||||
roleName);
|
||||
}
|
||||
String roleGroupKey = providerUtils.getGroupKey(roleGroup, appConf);
|
||||
String roleGroupIPKey = null;
|
||||
if (roleGroupKey != null) {
|
||||
if (roleNameKey == null || !roleGroupKey.equals(roleNameKey)) {
|
||||
replaceTokens.put(String.format(HOST_KEY_FORMAT, roleGroupKey),
|
||||
thisHost);
|
||||
roleGroupIPKey = Pattern.quote(String.format(IP_KEY_FORMAT,
|
||||
roleGroupKey));
|
||||
}
|
||||
} else {
|
||||
// should not happen, but log if it does
|
||||
log.info("Not replacing HOST or IP tokens because key was null for {}",
|
||||
roleGroup);
|
||||
}
|
||||
replaceTokens.put("${THIS_HOST}", thisHost);
|
||||
|
||||
for (Entry<String, String> export : exports.entrySet()) {
|
||||
String value = export.getValue();
|
||||
// replace host names and site properties
|
||||
for (Map.Entry<String, String> entry : replaceTokens.entrySet()) {
|
||||
String token = entry.getKey();
|
||||
if (value.contains(token)) {
|
||||
value = value.replaceAll(Pattern.quote(token), entry.getValue());
|
||||
}
|
||||
}
|
||||
Set<String> values = new HashSet<>();
|
||||
for (String ip : ips) {
|
||||
values.add(substituteIP(roleNameIPKey, roleGroupIPKey, ip, value));
|
||||
}
|
||||
for (String exportValue : values) {
|
||||
if (exportValue.contains(VARIABLE_INDICATOR)) {
|
||||
// not all variables have been substituted, so do not export
|
||||
continue;
|
||||
}
|
||||
ExportEntry entry = new ExportEntry();
|
||||
entry.setContainerId(containerId);
|
||||
entry.setLevel(APPLICATION_TAG);
|
||||
entry.setValue(exportValue);
|
||||
entry.setUpdatedTime(new Date().toString());
|
||||
Set<ExportEntry> exportEntries = getExportEntries(export.getKey());
|
||||
exportEntries.add(entry);
|
||||
log.info("Preparing to publish for {}. Key {} and Value {}",
|
||||
roleName, export.getKey(), entry);
|
||||
}
|
||||
}
|
||||
if (!exportMap.isEmpty()) {
|
||||
providerUtils.publishExportGroup(exportMap, getAmState(), EXPORT_GROUP);
|
||||
}
|
||||
}
|
||||
|
||||
protected String substituteIP(String roleNameIPKey, String roleGroupIPKey,
|
||||
String ip, String value) {
|
||||
if (roleNameIPKey != null) {
|
||||
value = value.replaceAll(roleNameIPKey, ip);
|
||||
}
|
||||
if (roleGroupIPKey != null) {
|
||||
value = value.replaceAll(roleGroupIPKey, ip);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
protected Set<ExportEntry> getExportEntries(String key) {
|
||||
if (!this.exportMap.containsKey(key)) {
|
||||
synchronized (this.exportMap) {
|
||||
if (!this.exportMap.containsKey(key)) {
|
||||
this.exportMap.put(key, Collections.newSetFromMap(
|
||||
new ConcurrentHashMap<>()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return this.exportMap.get(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, MonitorDetail> buildMonitorDetails(ClusterDescription clusterDesc) {
|
||||
Map<String, MonitorDetail> details = super.buildMonitorDetails(clusterDesc);
|
||||
buildRoleHostDetails(details);
|
||||
return details;
|
||||
}
|
||||
|
||||
private void buildRoleHostDetails(Map<String, MonitorDetail> details) {
|
||||
for (Map.Entry<String, Map<String, ClusterNode>> entry :
|
||||
getAmState().getRoleClusterNodeMapping().entrySet()) {
|
||||
details.put(entry.getKey() + " Host(s)/Container(s)",
|
||||
new MonitorDetail(providerUtils.getHostsList(
|
||||
entry.getValue().values(), false).toString(), false));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,305 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.providers.slideram;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResource;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResourceType;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.slider.api.InternalKeys;
|
||||
import org.apache.slider.api.ResourceKeys;
|
||||
import org.apache.slider.api.RoleKeys;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.common.SliderXmlConfKeys;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.MapOperations;
|
||||
import org.apache.slider.core.exceptions.BadClusterStateException;
|
||||
import org.apache.slider.core.exceptions.BadConfigException;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.core.launch.AbstractLauncher;
|
||||
import org.apache.slider.core.launch.JavaCommandLineBuilder;
|
||||
import org.apache.slider.providers.AbstractClientProvider;
|
||||
import org.apache.slider.providers.PlacementPolicy;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
import org.apache.slider.providers.ProviderUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.slider.api.ResourceKeys.COMPONENT_INSTANCES;
|
||||
|
||||
/**
|
||||
* handles the setup of the Slider AM.
|
||||
* This keeps aspects of role, cluster validation and Clusterspec setup
|
||||
* out of the core slider client
|
||||
*/
|
||||
public class SliderAMClientProvider extends AbstractClientProvider
|
||||
implements SliderKeys {
|
||||
|
||||
|
||||
protected static final Logger log =
|
||||
LoggerFactory.getLogger(SliderAMClientProvider.class);
|
||||
protected static final String NAME = "SliderAM";
|
||||
public static final String INSTANCE_RESOURCE_BASE = PROVIDER_RESOURCE_BASE_ROOT +
|
||||
"slideram/instance/";
|
||||
public static final String INTERNAL_JSON =
|
||||
INSTANCE_RESOURCE_BASE + "internal.json";
|
||||
public static final String APPCONF_JSON =
|
||||
INSTANCE_RESOURCE_BASE + "appconf.json";
|
||||
public static final String RESOURCES_JSON =
|
||||
INSTANCE_RESOURCE_BASE + "resources.json";
|
||||
|
||||
public SliderAMClientProvider(Configuration conf) {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* List of roles
|
||||
*/
|
||||
public static final List<ProviderRole> ROLES =
|
||||
new ArrayList<ProviderRole>();
|
||||
|
||||
public static final int KEY_AM = ROLE_AM_PRIORITY_INDEX;
|
||||
|
||||
public static final ProviderRole APPMASTER =
|
||||
new ProviderRole(COMPONENT_AM, KEY_AM,
|
||||
PlacementPolicy.EXCLUDE_FROM_FLEXING,
|
||||
ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
|
||||
0, "");
|
||||
|
||||
/**
|
||||
* Initialize role list
|
||||
*/
|
||||
static {
|
||||
ROLES.add(APPMASTER);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ProviderRole> getRoles() {
|
||||
return ROLES;
|
||||
}
|
||||
|
||||
|
||||
@Override //Client
|
||||
public void preflightValidateClusterConfiguration(SliderFileSystem sliderFileSystem,
|
||||
String clustername,
|
||||
Configuration configuration,
|
||||
AggregateConf instanceDefinition,
|
||||
Path clusterDirPath,
|
||||
Path generatedConfDirPath,
|
||||
boolean secure)
|
||||
throws SliderException, IOException {
|
||||
|
||||
super.preflightValidateClusterConfiguration(sliderFileSystem, clustername, configuration, instanceDefinition, clusterDirPath, generatedConfDirPath, secure);
|
||||
//add a check for the directory being writeable by the current user
|
||||
String
|
||||
dataPath = instanceDefinition.getInternalOperations()
|
||||
.getGlobalOptions()
|
||||
.getMandatoryOption(
|
||||
InternalKeys.INTERNAL_DATA_DIR_PATH);
|
||||
|
||||
Path path = new Path(dataPath);
|
||||
sliderFileSystem.verifyDirectoryWriteAccess(path);
|
||||
Path historyPath = new Path(clusterDirPath, SliderKeys.HISTORY_DIR_NAME);
|
||||
sliderFileSystem.verifyDirectoryWriteAccess(historyPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that an instance definition is considered valid by the provider
|
||||
* @param instanceDefinition instance definition
|
||||
* @throws SliderException if the configuration is not valid
|
||||
*/
|
||||
public void validateInstanceDefinition(AggregateConf instanceDefinition, SliderFileSystem fs) throws
|
||||
SliderException {
|
||||
|
||||
super.validateInstanceDefinition(instanceDefinition, fs);
|
||||
|
||||
// make sure there is no negative entry in the instance count
|
||||
Map<String, Map<String, String>> instanceMap =
|
||||
instanceDefinition.getResources().components;
|
||||
for (Map.Entry<String, Map<String, String>> entry : instanceMap.entrySet()) {
|
||||
MapOperations mapOperations = new MapOperations(entry);
|
||||
int instances = mapOperations.getOptionInt(COMPONENT_INSTANCES, 0);
|
||||
if (instances < 0) {
|
||||
throw new BadClusterStateException(
|
||||
"Component %s has negative instance count: %d",
|
||||
mapOperations.name,
|
||||
instances);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The Slider AM sets up all the dependency JARs above slider.jar itself
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
public void prepareAMAndConfigForLaunch(SliderFileSystem fileSystem,
|
||||
Configuration serviceConf,
|
||||
AbstractLauncher launcher,
|
||||
AggregateConf instanceDescription,
|
||||
Path snapshotConfDirPath,
|
||||
Path generatedConfDirPath,
|
||||
Configuration clientConfExtras,
|
||||
String libdir,
|
||||
Path tempPath, boolean miniClusterTestRun)
|
||||
throws IOException, SliderException {
|
||||
|
||||
Map<String, LocalResource> providerResources = new HashMap<>();
|
||||
|
||||
ProviderUtils.addProviderJar(providerResources,
|
||||
this,
|
||||
SLIDER_JAR,
|
||||
fileSystem,
|
||||
tempPath,
|
||||
libdir,
|
||||
miniClusterTestRun);
|
||||
|
||||
log.info("Loading all dependencies for AM.");
|
||||
// If slider.tar.gz is available in hdfs use it, else upload all jars
|
||||
Path dependencyLibTarGzip = fileSystem.getDependencyTarGzip();
|
||||
if (fileSystem.isFile(dependencyLibTarGzip)) {
|
||||
SliderUtils.putAmTarGzipAndUpdate(providerResources, fileSystem);
|
||||
} else {
|
||||
for (String libDirProp : SliderUtils.getLibDirs()) {
|
||||
ProviderUtils.addAllDependencyJars(providerResources,
|
||||
fileSystem,
|
||||
tempPath,
|
||||
libdir,
|
||||
libDirProp);
|
||||
|
||||
}
|
||||
}
|
||||
addKeytabResourceIfNecessary(fileSystem,
|
||||
instanceDescription,
|
||||
providerResources);
|
||||
|
||||
launcher.addLocalResources(providerResources);
|
||||
|
||||
//also pick up all env variables from a map
|
||||
launcher.copyEnvVars(
|
||||
instanceDescription.getInternalOperations().getOrAddComponent(
|
||||
SliderKeys.COMPONENT_AM));
|
||||
}
|
||||
|
||||
/**
|
||||
* If the cluster is secure, and an HDFS installed keytab is available for AM
|
||||
* authentication, add this keytab as a local resource for the AM launch.
|
||||
*
|
||||
* @param fileSystem
|
||||
* @param instanceDescription
|
||||
* @param providerResources
|
||||
* @throws IOException
|
||||
* @throws BadConfigException if there's no keytab and it is explicitly required.
|
||||
*/
|
||||
protected void addKeytabResourceIfNecessary(SliderFileSystem fileSystem,
|
||||
AggregateConf instanceDescription,
|
||||
Map<String, LocalResource> providerResources)
|
||||
throws IOException, BadConfigException {
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
String keytabPathOnHost = instanceDescription.getAppConfOperations()
|
||||
.getComponent(SliderKeys.COMPONENT_AM).get(
|
||||
SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
|
||||
if (SliderUtils.isUnset(keytabPathOnHost)) {
|
||||
String amKeytabName = instanceDescription.getAppConfOperations()
|
||||
.getComponent(SliderKeys.COMPONENT_AM).get(
|
||||
SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
|
||||
String keytabDir = instanceDescription.getAppConfOperations()
|
||||
.getComponent(SliderKeys.COMPONENT_AM).get(
|
||||
SliderXmlConfKeys.KEY_HDFS_KEYTAB_DIR);
|
||||
Path keytabPath = fileSystem.buildKeytabPath(keytabDir, amKeytabName,
|
||||
instanceDescription.getName());
|
||||
if (fileSystem.getFileSystem().exists(keytabPath)) {
|
||||
LocalResource keytabRes = fileSystem.createAmResource(keytabPath,
|
||||
LocalResourceType.FILE);
|
||||
|
||||
providerResources.put(SliderKeys.KEYTAB_DIR + "/" +
|
||||
amKeytabName, keytabRes);
|
||||
} else {
|
||||
log.warn("No keytab file was found at {}.", keytabPath);
|
||||
if (getConf().getBoolean(KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) {
|
||||
throw new BadConfigException("No keytab file was found at %s.", keytabPath);
|
||||
|
||||
} else {
|
||||
log.warn("The AM will be "
|
||||
+ "started without a kerberos authenticated identity. "
|
||||
+ "The application is therefore not guaranteed to remain "
|
||||
+ "operational beyond 24 hours.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the AM resource with any local needs
|
||||
* @param capability capability to update
|
||||
*/
|
||||
public void prepareAMResourceRequirements(MapOperations sliderAM,
|
||||
Resource capability) {
|
||||
capability.setMemory(sliderAM.getOptionInt(
|
||||
ResourceKeys.YARN_MEMORY,
|
||||
capability.getMemory()));
|
||||
capability.setVirtualCores(
|
||||
sliderAM.getOptionInt(ResourceKeys.YARN_CORES, capability.getVirtualCores()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract any JVM options from the cluster specification and
|
||||
* add them to the command line
|
||||
*/
|
||||
public void addJVMOptions(AggregateConf aggregateConf,
|
||||
JavaCommandLineBuilder cmdLine)
|
||||
throws BadConfigException {
|
||||
|
||||
MapOperations sliderAM =
|
||||
aggregateConf.getAppConfOperations().getMandatoryComponent(
|
||||
SliderKeys.COMPONENT_AM);
|
||||
cmdLine.forceIPv4().headless();
|
||||
String heap = sliderAM.getOption(RoleKeys.JVM_HEAP,
|
||||
DEFAULT_JVM_HEAP);
|
||||
cmdLine.setJVMHeap(heap);
|
||||
String jvmopts = sliderAM.getOption(RoleKeys.JVM_OPTS, "");
|
||||
if (SliderUtils.isSet(jvmopts)) {
|
||||
cmdLine.add(jvmopts);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void prepareInstanceConfiguration(AggregateConf aggregateConf)
|
||||
throws SliderException, IOException {
|
||||
mergeTemplates(aggregateConf,
|
||||
INTERNAL_JSON, RESOURCES_JSON, APPCONF_JSON
|
||||
);
|
||||
}
|
||||
}
|
|
@ -1,185 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.providers.slideram;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
|
||||
import org.apache.hadoop.registry.client.types.ServiceRecord;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.common.tools.ConfigHelper;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.MapOperations;
|
||||
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
|
||||
import org.apache.slider.core.exceptions.SliderException;
|
||||
import org.apache.slider.core.launch.ContainerLauncher;
|
||||
import org.apache.slider.core.registry.docstore.PublishedConfiguration;
|
||||
import org.apache.slider.core.registry.info.CustomRegistryConstants;
|
||||
import org.apache.slider.providers.AbstractProviderService;
|
||||
import org.apache.slider.providers.ProviderCore;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
import org.apache.slider.providers.agent.AgentKeys;
|
||||
import org.apache.slider.server.appmaster.PublishedArtifacts;
|
||||
import org.apache.slider.server.appmaster.web.rest.RestPaths;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
|
||||
|
||||
/**
|
||||
* Exists just to move some functionality out of AppMaster into a peer class
|
||||
* of the actual service provider doing the real work
|
||||
*/
|
||||
public class SliderAMProviderService extends AbstractProviderService implements
|
||||
ProviderCore,
|
||||
AgentKeys,
|
||||
SliderKeys {
|
||||
|
||||
public SliderAMProviderService() {
|
||||
super("SliderAMProviderService");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHumanName() {
|
||||
return "Slider Application";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration loadProviderConfigurationInformation(File confDir) throws
|
||||
BadCommandArgumentsException,
|
||||
IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void buildContainerLaunchContext(ContainerLauncher containerLauncher,
|
||||
AggregateConf instanceDefinition,
|
||||
Container container,
|
||||
ProviderRole role,
|
||||
SliderFileSystem sliderFileSystem,
|
||||
Path generatedConfPath,
|
||||
MapOperations resourceComponent,
|
||||
MapOperations appComponent,
|
||||
Path containerTmpDirPath) throws IOException, SliderException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ProviderRole> getRoles() {
|
||||
return new ArrayList<>(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void validateInstanceDefinition(AggregateConf instanceDefinition) throws
|
||||
SliderException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyInitialRegistryDefinitions(URL amWebURI,
|
||||
ServiceRecord serviceRecord)
|
||||
throws IOException {
|
||||
super.applyInitialRegistryDefinitions(amWebURI,
|
||||
serviceRecord);
|
||||
// now publish site.xml files
|
||||
YarnConfiguration defaultYarnConfig = new YarnConfiguration();
|
||||
amState.getPublishedSliderConfigurations().put(
|
||||
PublishedArtifacts.COMPLETE_CONFIG,
|
||||
new PublishedConfiguration(
|
||||
"Complete slider application settings",
|
||||
getConfig(), getConfig()));
|
||||
amState.getPublishedSliderConfigurations().put(
|
||||
PublishedArtifacts.YARN_SITE_CONFIG,
|
||||
new PublishedConfiguration(
|
||||
"YARN site settings",
|
||||
ConfigHelper.loadFromResource("yarn-site.xml"),
|
||||
defaultYarnConfig) );
|
||||
|
||||
amState.getPublishedSliderConfigurations().put(
|
||||
PublishedArtifacts.CORE_SITE_CONFIG,
|
||||
new PublishedConfiguration(
|
||||
"Core site settings",
|
||||
ConfigHelper.loadFromResource("core-site.xml"),
|
||||
defaultYarnConfig) );
|
||||
amState.getPublishedSliderConfigurations().put(
|
||||
PublishedArtifacts.HDFS_SITE_CONFIG,
|
||||
new PublishedConfiguration(
|
||||
"HDFS site settings",
|
||||
ConfigHelper.loadFromResource("hdfs-site.xml"),
|
||||
new HdfsConfiguration(true)) );
|
||||
|
||||
|
||||
try {
|
||||
|
||||
URL managementAPI = new URL(amWebURI, RELATIVE_PATH_MANAGEMENT);
|
||||
URL registryREST = new URL(amWebURI, RELATIVE_PATH_REGISTRY);
|
||||
|
||||
URL publisherURL = new URL(amWebURI, RELATIVE_PATH_PUBLISHER);
|
||||
|
||||
// Set the configurations URL.
|
||||
|
||||
String configurationsURL = SliderUtils.appendToURL(
|
||||
publisherURL.toExternalForm(), RestPaths.SLIDER_CONFIGSET);
|
||||
String exportsURL = SliderUtils.appendToURL(
|
||||
publisherURL.toExternalForm(), RestPaths.SLIDER_EXPORTS);
|
||||
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.webEndpoint(
|
||||
CustomRegistryConstants.WEB_UI, amWebURI.toURI()));
|
||||
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.webEndpoint(
|
||||
CustomRegistryConstants.AM_REST_BASE, amWebURI.toURI()));
|
||||
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.restEndpoint(
|
||||
CustomRegistryConstants.MANAGEMENT_REST_API,
|
||||
managementAPI.toURI()));
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.restEndpoint(
|
||||
CustomRegistryConstants.PUBLISHER_REST_API,
|
||||
publisherURL.toURI()));
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.restEndpoint(
|
||||
CustomRegistryConstants.REGISTRY_REST_API,
|
||||
registryREST.toURI()));
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.restEndpoint(
|
||||
CustomRegistryConstants.PUBLISHER_CONFIGURATIONS_API,
|
||||
new URI(configurationsURL)));
|
||||
serviceRecord.addExternalEndpoint(
|
||||
RegistryTypeUtils.restEndpoint(
|
||||
CustomRegistryConstants.PUBLISHER_EXPORTS_API,
|
||||
new URI(exportsURL)));
|
||||
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,16 +18,12 @@
|
|||
|
||||
package org.apache.slider.server.appmaster;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.common.tools.SliderFileSystem;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.MapOperations;
|
||||
import org.apache.slider.core.launch.ContainerLauncher;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
import org.apache.slider.providers.ProviderService;
|
||||
|
@ -37,8 +33,8 @@ import org.apache.slider.server.appmaster.actions.QueueAccess;
|
|||
import org.apache.slider.server.appmaster.state.ContainerAssignment;
|
||||
import org.apache.slider.server.appmaster.state.RoleInstance;
|
||||
import org.apache.slider.server.appmaster.state.RoleStatus;
|
||||
import org.apache.slider.server.services.workflow.WorkflowExecutorService;
|
||||
import org.apache.slider.server.services.workflow.ServiceThreadFactory;
|
||||
import org.apache.slider.server.services.workflow.WorkflowExecutorService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -73,16 +69,6 @@ public class RoleLaunchService
|
|||
*/
|
||||
private final SliderFileSystem fs;
|
||||
|
||||
/**
|
||||
* Path in the launch filesystem that refers to a configuration directory
|
||||
* -the interpretation of it is left to the Provider
|
||||
*/
|
||||
private final Path generatedConfDirPath;
|
||||
/**
|
||||
* Path in the launch filesystem that refers to a temp directory
|
||||
* which will be cleaned up at (some) time in the future
|
||||
*/
|
||||
private final Path launcherTmpDirPath;
|
||||
|
||||
private Map<String, String> envVars;
|
||||
|
||||
|
@ -91,21 +77,13 @@ public class RoleLaunchService
|
|||
* @param queueAccess
|
||||
* @param provider the provider
|
||||
* @param fs filesystem
|
||||
* @param generatedConfDirPath path in the FS for the generated dir
|
||||
* @param envVars environment variables
|
||||
* @param launcherTmpDirPath path for a temporary data in the launch process
|
||||
*/
|
||||
public RoleLaunchService(QueueAccess queueAccess,
|
||||
ProviderService provider,
|
||||
SliderFileSystem fs,
|
||||
Path generatedConfDirPath,
|
||||
Map<String, String> envVars,
|
||||
Path launcherTmpDirPath) {
|
||||
public RoleLaunchService(QueueAccess queueAccess, ProviderService provider,
|
||||
SliderFileSystem fs, Map<String, String> envVars) {
|
||||
super(ROLE_LAUNCH_SERVICE);
|
||||
this.actionQueue = queueAccess;
|
||||
this.fs = fs;
|
||||
this.generatedConfDirPath = generatedConfDirPath;
|
||||
this.launcherTmpDirPath = launcherTmpDirPath;
|
||||
this.provider = provider;
|
||||
this.envVars = envVars;
|
||||
}
|
||||
|
@ -120,22 +98,12 @@ public class RoleLaunchService
|
|||
/**
|
||||
* Start an asychronous launch operation
|
||||
* @param assignment container assignment
|
||||
* @param clusterSpec cluster spec to use for template
|
||||
* @param credentials credentials to use
|
||||
*/
|
||||
public void launchRole(ContainerAssignment assignment,
|
||||
AggregateConf clusterSpec,
|
||||
Credentials credentials) {
|
||||
RoleStatus role = assignment.role;
|
||||
String roleName = role.getName();
|
||||
String roleGroup = role.getGroup();
|
||||
// prelaunch safety check
|
||||
Preconditions.checkArgument(provider.isSupportedRole(roleName));
|
||||
Application application, Credentials credentials) {
|
||||
RoleLaunchService.RoleLauncher launcher =
|
||||
new RoleLaunchService.RoleLauncher(assignment,
|
||||
clusterSpec,
|
||||
clusterSpec.getResourceOperations().getOrAddComponent(roleGroup),
|
||||
clusterSpec.getAppConfOperations().getOrAddComponent(roleGroup),
|
||||
new RoleLaunchService.RoleLauncher(assignment, application,
|
||||
credentials);
|
||||
execute(launcher);
|
||||
}
|
||||
|
@ -148,35 +116,21 @@ public class RoleLaunchService
|
|||
private final ContainerAssignment assignment;
|
||||
// Allocated container
|
||||
public final Container container;
|
||||
private final MapOperations resourceComponent;
|
||||
private final MapOperations appComponent;
|
||||
private final AggregateConf instanceDefinition;
|
||||
public final Application application;
|
||||
public final ProviderRole role;
|
||||
private final Credentials credentials;
|
||||
private Exception raisedException;
|
||||
|
||||
public RoleLauncher(ContainerAssignment assignment,
|
||||
AggregateConf instanceDefinition,
|
||||
MapOperations resourceComponent,
|
||||
MapOperations appComponent,
|
||||
Application application,
|
||||
Credentials credentials) {
|
||||
this.assignment = assignment;
|
||||
this.credentials = credentials;
|
||||
this.container = assignment.container;
|
||||
RoleStatus roleStatus = assignment.role;
|
||||
|
||||
assert resourceComponent != null;
|
||||
assert appComponent != null;
|
||||
ProviderRole providerRole = roleStatus.getProviderRole();
|
||||
assert providerRole != null;
|
||||
this.role = providerRole;
|
||||
this.resourceComponent = resourceComponent;
|
||||
this.appComponent = appComponent;
|
||||
this.instanceDefinition = instanceDefinition;
|
||||
}
|
||||
this.application = application;
|
||||
|
||||
public Exception getRaisedException() {
|
||||
return raisedException;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -196,41 +150,28 @@ public class RoleLaunchService
|
|||
containerLauncher.setupUGI();
|
||||
containerLauncher.putEnv(envVars);
|
||||
|
||||
log.debug("Launching container {} into role {}",
|
||||
container.getId(),
|
||||
role.name);
|
||||
log.info("Launching container {} into RoleName = {}, RoleGroup = {}",
|
||||
container.getId(), role.name, role.group);
|
||||
|
||||
//now build up the configuration data
|
||||
Path containerTmpDirPath =
|
||||
new Path(launcherTmpDirPath, container.getId().toString());
|
||||
provider.buildContainerLaunchContext(containerLauncher,
|
||||
instanceDefinition,
|
||||
container,
|
||||
role,
|
||||
fs,
|
||||
generatedConfDirPath,
|
||||
resourceComponent,
|
||||
appComponent,
|
||||
containerTmpDirPath);
|
||||
provider.buildContainerLaunchContext(containerLauncher, application,
|
||||
container, role, fs);
|
||||
|
||||
RoleInstance instance = new RoleInstance(container);
|
||||
String[] envDescription = containerLauncher.dumpEnvToString();
|
||||
|
||||
String commandsAsString = containerLauncher.getCommandsAsString();
|
||||
log.info("Starting container with command: {}",
|
||||
commandsAsString);
|
||||
log.info("Starting container with command: {}", commandsAsString);
|
||||
|
||||
instance.providerRole = role;
|
||||
instance.command = commandsAsString;
|
||||
instance.role = role.name;
|
||||
instance.group = role.group;
|
||||
instance.roleId = role.id;
|
||||
instance.appVersion = instanceDefinition.getAppConfOperations()
|
||||
.getGlobalOptions().get(SliderKeys.APP_VERSION);
|
||||
instance.environment = envDescription;
|
||||
int delay = appComponent.getOptionInt(
|
||||
AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0);
|
||||
int maxDelay =
|
||||
getConfig().getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
|
||||
long delay = role.component.getConfiguration()
|
||||
.getPropertyLong(AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0);
|
||||
long maxDelay = getConfig()
|
||||
.getLong(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
|
||||
YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
|
||||
if (delay > maxDelay/1000) {
|
||||
log.warn("Container launch delay of {} exceeds the maximum allowed of"
|
||||
|
@ -238,20 +179,17 @@ public class RoleLaunchService
|
|||
delay, maxDelay/1000);
|
||||
delay = 0;
|
||||
}
|
||||
log.info("Container launch delay for {} set to {} seconds",
|
||||
role.name, delay);
|
||||
actionQueue.schedule(new ActionStartContainer("starting " + role.name,
|
||||
container,
|
||||
containerLauncher.completeContainerLaunch(),
|
||||
instance,
|
||||
delay,
|
||||
log.info("Container launch delay for {} set to {} seconds", role.name,
|
||||
delay);
|
||||
actionQueue.schedule(
|
||||
new ActionStartContainer("starting " + role.name, container,
|
||||
containerLauncher.completeContainerLaunch(), instance, delay,
|
||||
TimeUnit.SECONDS));
|
||||
} catch (Exception e) {
|
||||
log.error("Exception thrown while trying to start {}: {}",
|
||||
role.name, e, e);
|
||||
raisedException = e;
|
||||
log.error("Exception thrown while trying to start " + role.name
|
||||
+ " container = " + container.getId() + " on host " + container
|
||||
.getNodeId(), e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.slider.server.appmaster.actions;
|
||||
|
||||
import org.apache.slider.api.proto.Messages;
|
||||
import org.apache.slider.core.conf.ConfTree;
|
||||
import org.apache.slider.server.appmaster.SliderAppMaster;
|
||||
import org.apache.slider.server.appmaster.state.AppState;
|
||||
|
@ -26,19 +27,16 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
public class ActionFlexCluster extends AsyncAction {
|
||||
|
||||
public final ConfTree resources;
|
||||
|
||||
public ActionFlexCluster(String name,
|
||||
long delay,
|
||||
TimeUnit timeUnit, ConfTree resources) {
|
||||
final Messages.FlexComponentRequestProto requestProto;
|
||||
public ActionFlexCluster(String name, long delay, TimeUnit timeUnit,
|
||||
Messages.FlexComponentRequestProto requestProto) {
|
||||
super(name, delay, timeUnit, ATTR_CHANGES_APP_SIZE);
|
||||
this.resources = resources;
|
||||
this.requestProto = requestProto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(SliderAppMaster appMaster,
|
||||
QueueAccess queueService,
|
||||
AppState appState) throws Exception {
|
||||
appMaster.flexCluster(resources);
|
||||
appMaster.flexCluster(requestProto);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.slider.server.appmaster.actions;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.server.appmaster.SliderAppMaster;
|
||||
import org.apache.slider.server.appmaster.state.AppState;
|
||||
|
||||
|
@ -31,21 +32,13 @@ public class ActionRegisterServiceInstance extends AsyncAction {
|
|||
|
||||
private final String instanceName;
|
||||
private final ApplicationId appId;
|
||||
|
||||
private final Application application;
|
||||
public ActionRegisterServiceInstance(String instanceName,
|
||||
ApplicationId appId) {
|
||||
ApplicationId appId, Application application) {
|
||||
super("ActionRegisterServiceInstance");
|
||||
this.instanceName = instanceName;
|
||||
this.appId = appId;
|
||||
}
|
||||
|
||||
public ActionRegisterServiceInstance(String instanceName,
|
||||
ApplicationId appId,
|
||||
long delay,
|
||||
TimeUnit timeUnit) {
|
||||
super("ActionRegisterServiceInstance", delay, timeUnit);
|
||||
this.instanceName = instanceName;
|
||||
this.appId = appId;
|
||||
this.application = application;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,6 +47,6 @@ public class ActionRegisterServiceInstance extends AsyncAction {
|
|||
AppState appState) throws Exception {
|
||||
|
||||
// YARN Registry do the registration
|
||||
appMaster.registerServiceInstance(instanceName, appId);
|
||||
appMaster.registerServiceInstance(instanceName, appId, application);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.appmaster.metrics;
|
||||
|
||||
import com.codahale.metrics.Counter;
|
||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
import org.apache.hadoop.metrics2.MetricsSource;
|
||||
import org.apache.hadoop.metrics2.MetricsSystem;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
||||
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.metrics2.lib.Interns.info;
|
||||
|
||||
@Metrics(context = "yarn-native-service")
|
||||
public class SliderMetrics implements MetricsSource {
|
||||
|
||||
@Metric("containers pending")
|
||||
public MutableGaugeInt containersPending;
|
||||
@Metric("anti-affinity containers pending")
|
||||
public MutableGaugeInt pendingAAContainers;
|
||||
@Metric("containers pending")
|
||||
public MutableGaugeInt containersRunning;
|
||||
@Metric("containers requested")
|
||||
public MutableGaugeInt containersDesired;
|
||||
@Metric("containers completed")
|
||||
public MutableGaugeInt containersCompleted;
|
||||
@Metric("containers failed")
|
||||
public MutableGaugeInt containersFailed;
|
||||
@Metric("containers failed since last threshold")
|
||||
public MutableGaugeInt failedSinceLastThreshold;
|
||||
@Metric("containers preempted")
|
||||
public MutableGaugeInt containersPreempted;
|
||||
@Metric("containers surplus")
|
||||
public MutableGaugeInt surplusContainers;
|
||||
|
||||
protected final MetricsRegistry registry;
|
||||
|
||||
public SliderMetrics(MetricsInfo metricsInfo) {
|
||||
registry = new MetricsRegistry(metricsInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getMetrics(MetricsCollector collector, boolean all) {
|
||||
registry.snapshot(collector.addRecord(registry.info()), all);
|
||||
}
|
||||
|
||||
public static SliderMetrics register(String name, String description) {
|
||||
SliderMetrics metrics = new SliderMetrics(info(name, description));
|
||||
DefaultMetricsSystem.instance().register(name, description, metrics);
|
||||
return metrics;
|
||||
}
|
||||
|
||||
public void tag(String name, String description, String value) {
|
||||
registry.tag(name, description, value);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.appmaster.operations;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.client.api.AMRMClient;
|
||||
import org.apache.slider.providers.ProviderService;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ProviderNotifyingOperationHandler extends RMOperationHandler {
|
||||
|
||||
private final ProviderService providerService;
|
||||
|
||||
public ProviderNotifyingOperationHandler(ProviderService providerService) {
|
||||
this.providerService = providerService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void releaseAssignedContainer(ContainerId containerId) {
|
||||
providerService.releaseAssignedContainer(containerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addContainerRequest(AMRMClient.ContainerRequest req) {
|
||||
providerService.addContainerRequest(req);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int cancelContainerRequests(Priority priority1,
|
||||
Priority priority2,
|
||||
int count) {
|
||||
return providerService.cancelContainerRequests(priority1, priority2, count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancelSingleRequest(AMRMClient.ContainerRequest request) {
|
||||
providerService.cancelSingleRequest(request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateBlacklist(List<String> blacklistAdditions,
|
||||
List<String> blacklistRemovals) {
|
||||
providerService.updateBlacklist(blacklistAdditions, blacklistRemovals);
|
||||
}
|
||||
}
|
|
@ -70,11 +70,12 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Messages.FlexClusterResponseProto flexCluster(RpcController controller,
|
||||
Messages.FlexClusterRequestProto request) throws ServiceException {
|
||||
public Messages.FlexComponentResponseProto flexComponent(
|
||||
RpcController controller, Messages.FlexComponentRequestProto request)
|
||||
throws ServiceException {
|
||||
try {
|
||||
return real.flexCluster(request);
|
||||
} catch (Exception e) {
|
||||
return real.flexComponent(request);
|
||||
} catch (IOException e) {
|
||||
throw wrap(e);
|
||||
}
|
||||
}
|
||||
|
@ -90,19 +91,6 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
|
||||
RpcController controller,
|
||||
Messages.GetInstanceDefinitionRequestProto request)
|
||||
throws ServiceException {
|
||||
try {
|
||||
return real.getInstanceDefinition(request);
|
||||
} catch (Exception e) {
|
||||
throw wrap(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(
|
||||
RpcController controller,
|
||||
|
|
|
@ -110,10 +110,10 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
|
||||
throws IOException {
|
||||
public Messages.FlexComponentResponseProto flexComponent(
|
||||
Messages.FlexComponentRequestProto request) throws IOException {
|
||||
try {
|
||||
return endpoint.flexCluster(NULL_CONTROLLER, request);
|
||||
return endpoint.flexComponent(NULL_CONTROLLER, request);
|
||||
} catch (ServiceException e) {
|
||||
throw convert(e);
|
||||
}
|
||||
|
@ -131,19 +131,6 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
|
||||
Messages.GetInstanceDefinitionRequestProto request) throws
|
||||
IOException,
|
||||
YarnException {
|
||||
try {
|
||||
return endpoint.getInstanceDefinition(NULL_CONTROLLER, request);
|
||||
} catch (ServiceException e) {
|
||||
throw convert(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(Messages.ListNodeUUIDsByRoleRequestProto request) throws
|
||||
IOException,
|
||||
|
|
|
@ -24,9 +24,9 @@ import org.apache.hadoop.service.AbstractService;
|
|||
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.SliderClusterProtocol;
|
||||
import org.apache.slider.api.proto.Messages;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.types.ApplicationLivenessInformation;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.api.types.ContainerInformation;
|
||||
|
@ -38,6 +38,7 @@ import org.apache.slider.core.exceptions.ServiceNotReadyException;
|
|||
import org.apache.slider.core.main.LauncherExitCodes;
|
||||
import org.apache.slider.core.persist.AggregateConfSerDeser;
|
||||
import org.apache.slider.core.persist.ConfTreeSerDeser;
|
||||
import org.apache.slider.core.persist.JsonSerDeser;
|
||||
import org.apache.slider.server.appmaster.AppMasterActionOperations;
|
||||
import org.apache.slider.server.appmaster.actions.ActionFlexCluster;
|
||||
import org.apache.slider.server.appmaster.actions.ActionHalt;
|
||||
|
@ -78,6 +79,9 @@ public class SliderIPCService extends AbstractService
|
|||
private final MetricsAndMonitoring metricsAndMonitoring;
|
||||
private final AppMasterActionOperations amOperations;
|
||||
private final ContentCache cache;
|
||||
private static final JsonSerDeser<Application> jsonSerDeser =
|
||||
new JsonSerDeser<Application>(Application.class);
|
||||
|
||||
|
||||
/**
|
||||
* This is the prefix used for metrics
|
||||
|
@ -195,17 +199,12 @@ public class SliderIPCService extends AbstractService
|
|||
return Messages.UpgradeContainersResponseProto.getDefaultInstance();
|
||||
}
|
||||
|
||||
@Override //SliderClusterProtocol
|
||||
public Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
|
||||
throws IOException {
|
||||
@Override
|
||||
public Messages.FlexComponentResponseProto flexComponent(
|
||||
Messages.FlexComponentRequestProto request) throws IOException {
|
||||
onRpcCall("flex");
|
||||
String payload = request.getClusterSpec();
|
||||
ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser();
|
||||
ConfTree updatedResources = confTreeSerDeser.fromJson(payload);
|
||||
schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS,
|
||||
updatedResources));
|
||||
return Messages.FlexClusterResponseProto.newBuilder().setResponse(
|
||||
true).build();
|
||||
schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS, request));
|
||||
return Messages.FlexComponentResponseProto.newBuilder().build();
|
||||
}
|
||||
|
||||
@Override //SliderClusterProtocol
|
||||
|
@ -216,38 +215,10 @@ public class SliderIPCService extends AbstractService
|
|||
String result;
|
||||
//quick update
|
||||
//query and json-ify
|
||||
ClusterDescription cd = state.refreshClusterStatus();
|
||||
result = cd.toJsonString();
|
||||
String stat = result;
|
||||
Application application = state.refreshClusterStatus();
|
||||
String stat = jsonSerDeser.toJson(application);
|
||||
return Messages.GetJSONClusterStatusResponseProto.newBuilder()
|
||||
.setClusterSpec(stat)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
|
||||
Messages.GetInstanceDefinitionRequestProto request)
|
||||
throws IOException, YarnException {
|
||||
|
||||
onRpcCall("getinstancedefinition");
|
||||
String internal;
|
||||
String resources;
|
||||
String app;
|
||||
AggregateConf instanceDefinition =
|
||||
state.getInstanceDefinitionSnapshot();
|
||||
internal = instanceDefinition.getInternal().toJson();
|
||||
resources = instanceDefinition.getResources().toJson();
|
||||
app = instanceDefinition.getAppConf().toJson();
|
||||
assert internal != null;
|
||||
assert resources != null;
|
||||
assert app != null;
|
||||
log.debug("Generating getInstanceDefinition Response");
|
||||
Messages.GetInstanceDefinitionResponseProto.Builder builder =
|
||||
Messages.GetInstanceDefinitionResponseProto.newBuilder();
|
||||
builder.setInternal(internal);
|
||||
builder.setResources(resources);
|
||||
builder.setApplication(app);
|
||||
return builder.build();
|
||||
.setClusterSpec(stat).build();
|
||||
}
|
||||
|
||||
@Override //SliderClusterProtocol
|
||||
|
|
|
@ -138,6 +138,7 @@ public class SecurityConfiguration {
|
|||
|
||||
public File getKeytabFile(AggregateConf instanceDefinition)
|
||||
throws SliderException, IOException {
|
||||
//TODO implement this for dash semantic
|
||||
String keytabFullPath = instanceDefinition.getAppConfOperations()
|
||||
.getComponent(SliderKeys.COMPONENT_AM)
|
||||
.get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.NodeReport;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -38,26 +38,24 @@ import java.util.Map;
|
|||
* are added.
|
||||
*/
|
||||
public class AppStateBindingInfo {
|
||||
public AggregateConf instanceDefinition;
|
||||
public Configuration serviceConfig = new Configuration();
|
||||
public Configuration publishedProviderConf = new Configuration(false);
|
||||
public Application application = null;
|
||||
public List<ProviderRole> roles = new ArrayList<>();
|
||||
public FileSystem fs;
|
||||
public Path historyPath;
|
||||
public List<Container> liveContainers = new ArrayList<>(0);
|
||||
public Map<String, String> applicationInfo = new HashMap<>();
|
||||
public ContainerReleaseSelector releaseSelector = new SimpleReleaseSelector();
|
||||
/** node reports off the RM. */
|
||||
public List<NodeReport> nodeReports = new ArrayList<>(0);
|
||||
|
||||
public void validate() throws IllegalArgumentException {
|
||||
Preconditions.checkArgument(instanceDefinition != null, "null instanceDefinition");
|
||||
Preconditions.checkArgument(serviceConfig != null, "null appmasterConfig");
|
||||
Preconditions.checkArgument(publishedProviderConf != null, "null publishedProviderConf");
|
||||
Preconditions.checkArgument(releaseSelector != null, "null releaseSelector");
|
||||
Preconditions.checkArgument(roles != null, "null providerRoles");
|
||||
Preconditions.checkArgument(fs != null, "null fs");
|
||||
Preconditions.checkArgument(historyPath != null, "null historyDir");
|
||||
Preconditions.checkArgument(nodeReports != null, "null nodeReports");
|
||||
Preconditions.checkArgument(application != null, "null application");
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,14 +21,12 @@ package org.apache.slider.server.appmaster.state;
|
|||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.ClusterNode;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.types.ApplicationLivenessInformation;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.api.types.NodeInformation;
|
||||
import org.apache.slider.api.types.RoleStatistics;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.core.exceptions.NoSuchNodeException;
|
||||
import org.apache.slider.core.registry.docstore.PublishedConfigSet;
|
||||
import org.apache.slider.core.registry.docstore.PublishedExportsSet;
|
||||
|
@ -130,45 +128,15 @@ public class ProviderAppState implements StateAccessForProviders {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterDescription getClusterStatus() {
|
||||
public Application getApplication() {
|
||||
return appState.getClusterStatus();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfTreeOperations getResourcesSnapshot() {
|
||||
return appState.getResourcesSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfTreeOperations getAppConfSnapshot() {
|
||||
return appState.getAppConfSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfTreeOperations getInternalsSnapshot() {
|
||||
return appState.getInternalsSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isApplicationLive() {
|
||||
return appState.isApplicationLive();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSnapshotTime() {
|
||||
return appState.getSnapshotTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregateConf getInstanceDefinitionSnapshot() {
|
||||
return appState.getInstanceDefinitionSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AggregateConf getUnresolvedInstanceDefinition() {
|
||||
return appState.getUnresolvedInstanceDefinition();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RoleStatus lookupRoleStatus(int key) {
|
||||
return appState.lookupRoleStatus(key);
|
||||
|
@ -221,25 +189,15 @@ public class ProviderAppState implements StateAccessForProviders {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterDescription refreshClusterStatus() {
|
||||
public Application refreshClusterStatus() {
|
||||
return appState.refreshClusterStatus();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RoleStatus> cloneRoleStatusList() {
|
||||
return appState.cloneRoleStatusList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ApplicationLivenessInformation getApplicationLivenessInformation() {
|
||||
return appState.getApplicationLivenessInformation();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> getLiveStatistics() {
|
||||
return appState.getLiveStatistics();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, ComponentInformation> getComponentInfoSnapshot() {
|
||||
return appState.getComponentInfoSnapshot();
|
||||
|
|
|
@ -135,17 +135,6 @@ public class RoleHistory {
|
|||
outstandingRequests = new OutstandingRequestTracker();
|
||||
}
|
||||
|
||||
/**
|
||||
* Register all metrics with the metrics infra
|
||||
* @param metrics metrics
|
||||
*/
|
||||
public void register(MetricsAndMonitoring metrics) {
|
||||
metrics.register(RoleHistory.class, dirty, "dirty");
|
||||
metrics.register(RoleHistory.class, nodesUpdatedTime, "nodes-updated.time");
|
||||
metrics.register(RoleHistory.class, nodeUpdateReceived, "nodes-updated.flag");
|
||||
metrics.register(RoleHistory.class, thawedDataTime, "thawed.time");
|
||||
metrics.register(RoleHistory.class, saveTime, "saved.time");
|
||||
}
|
||||
|
||||
/**
|
||||
* safety check: make sure the role is unique amongst
|
||||
|
@ -1102,13 +1091,13 @@ public class RoleHistory {
|
|||
int roleId = role.getKey();
|
||||
List<OutstandingRequest> requests = new ArrayList<>(toCancel);
|
||||
// there may be pending requests which can be cancelled here
|
||||
long pending = role.getPendingAntiAffineRequests();
|
||||
long pending = role.getAAPending();
|
||||
if (pending > 0) {
|
||||
// there are some pending ones which can be cancelled first
|
||||
long pendingToCancel = Math.min(pending, toCancel);
|
||||
log.info("Cancelling {} pending AA allocations, leaving {}", toCancel,
|
||||
pendingToCancel);
|
||||
role.setPendingAntiAffineRequests(pending - pendingToCancel);
|
||||
role.setAAPending(pending - pendingToCancel);
|
||||
toCancel -= pendingToCancel;
|
||||
}
|
||||
if (toCancel > 0 && role.isAARequestOutstanding()) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.slider.api.ClusterNode;
|
|||
import org.apache.slider.api.proto.Messages;
|
||||
import org.apache.slider.api.types.ContainerInformation;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -40,6 +41,7 @@ import java.util.List;
|
|||
public final class RoleInstance implements Cloneable {
|
||||
|
||||
public Container container;
|
||||
public ProviderRole providerRole;
|
||||
/**
|
||||
* Container ID
|
||||
*/
|
||||
|
|
|
@ -21,19 +21,22 @@ package org.apache.slider.server.appmaster.state;
|
|||
import com.codahale.metrics.Metric;
|
||||
import com.codahale.metrics.MetricSet;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.api.types.RoleStatistics;
|
||||
import org.apache.slider.providers.PlacementPolicy;
|
||||
import org.apache.slider.providers.ProviderRole;
|
||||
import org.apache.slider.server.appmaster.management.BoolMetricPredicate;
|
||||
import org.apache.slider.server.appmaster.management.LongGauge;
|
||||
import org.apache.slider.server.appmaster.metrics.SliderMetrics;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.metrics2.lib.Interns.info;
|
||||
|
||||
/**
|
||||
* Models the ongoing status of all nodes in an application.
|
||||
*
|
||||
|
@ -42,7 +45,7 @@ import java.util.Map;
|
|||
* requires synchronization. Where synchronized access is good is that it allows for
|
||||
* the whole instance to be locked, for updating multiple entries.
|
||||
*/
|
||||
public final class RoleStatus implements Cloneable, MetricSet {
|
||||
public final class RoleStatus implements MetricSet {
|
||||
|
||||
private final String name;
|
||||
private final String group;
|
||||
|
@ -53,25 +56,9 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
private final int key;
|
||||
private final ProviderRole providerRole;
|
||||
|
||||
private final LongGauge actual = new LongGauge();
|
||||
private final LongGauge completed = new LongGauge();
|
||||
private final LongGauge desired = new LongGauge();
|
||||
private final LongGauge failed = new LongGauge();
|
||||
private final LongGauge failedRecently = new LongGauge(0);
|
||||
private final LongGauge limitsExceeded = new LongGauge(0);
|
||||
private final LongGauge nodeFailed = new LongGauge(0);
|
||||
/** Number of AA requests queued. */
|
||||
private final LongGauge pendingAntiAffineRequests = new LongGauge(0);
|
||||
private final LongGauge preempted = new LongGauge(0);
|
||||
private final LongGauge releasing = new LongGauge();
|
||||
private final LongGauge requested = new LongGauge();
|
||||
private final LongGauge started = new LongGauge();
|
||||
private final LongGauge startFailed = new LongGauge();
|
||||
private final LongGauge totalRequested = new LongGauge();
|
||||
|
||||
/** resource requirements */
|
||||
private Resource resourceRequirements;
|
||||
|
||||
private SliderMetrics componentMetrics;
|
||||
|
||||
/** any pending AA request */
|
||||
private volatile OutstandingRequest outstandingAArequest = null;
|
||||
|
@ -84,28 +71,19 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
this.name = providerRole.name;
|
||||
this.group = providerRole.group;
|
||||
this.key = providerRole.id;
|
||||
componentMetrics =
|
||||
SliderMetrics.register(this.name, "Metrics for component " + this.name);
|
||||
componentMetrics
|
||||
.tag("type", "Metrics type [component or service]", "component");
|
||||
}
|
||||
|
||||
public SliderMetrics getComponentMetrics() {
|
||||
return this.componentMetrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Metric> getMetrics() {
|
||||
Map<String, Metric> metrics = new HashMap<>(15);
|
||||
metrics.put("actual", actual);
|
||||
metrics.put("completed", completed );
|
||||
metrics.put("desired", desired);
|
||||
metrics.put("failed", failed);
|
||||
metrics.put("limitsExceeded", limitsExceeded);
|
||||
metrics.put("nodeFailed", nodeFailed);
|
||||
metrics.put("preempted", preempted);
|
||||
metrics.put("pendingAntiAffineRequests", pendingAntiAffineRequests);
|
||||
metrics.put("releasing", releasing);
|
||||
metrics.put("requested", requested);
|
||||
metrics.put("preempted", preempted);
|
||||
metrics.put("releasing", releasing );
|
||||
metrics.put("requested", requested);
|
||||
metrics.put("started", started);
|
||||
metrics.put("startFailed", startFailed);
|
||||
metrics.put("totalRequested", totalRequested);
|
||||
|
||||
metrics.put("outstandingAArequest",
|
||||
new BoolMetricPredicate(new BoolMetricPredicate.Eval() {
|
||||
@Override
|
||||
|
@ -174,83 +152,6 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
return !hasPlacementPolicy(PlacementPolicy.ANYWHERE);
|
||||
}
|
||||
|
||||
public long getDesired() {
|
||||
return desired.get();
|
||||
}
|
||||
|
||||
public void setDesired(long desired) {
|
||||
this.desired.set(desired);
|
||||
}
|
||||
|
||||
public long getActual() {
|
||||
return actual.get();
|
||||
}
|
||||
|
||||
public long incActual() {
|
||||
return actual.incrementAndGet();
|
||||
}
|
||||
|
||||
public long decActual() {
|
||||
return actual.decToFloor(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the request count.
|
||||
* @return a count of requested containers
|
||||
*/
|
||||
public long getRequested() {
|
||||
return requested.get();
|
||||
}
|
||||
|
||||
public long incRequested() {
|
||||
totalRequested.incrementAndGet();
|
||||
return requested.incrementAndGet();
|
||||
}
|
||||
|
||||
public void cancel(long count) {
|
||||
requested.decToFloor(count);
|
||||
}
|
||||
|
||||
public void decRequested() {
|
||||
cancel(1);
|
||||
}
|
||||
|
||||
public long getReleasing() {
|
||||
return releasing.get();
|
||||
}
|
||||
|
||||
public long incReleasing() {
|
||||
return releasing.incrementAndGet();
|
||||
}
|
||||
|
||||
public long decReleasing() {
|
||||
return releasing.decToFloor(1);
|
||||
}
|
||||
|
||||
public long getFailed() {
|
||||
return failed.get();
|
||||
}
|
||||
|
||||
public long getFailedRecently() {
|
||||
return failedRecently.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the recent failure
|
||||
* @return the number of failures in the "recent" window
|
||||
*/
|
||||
public long resetFailedRecently() {
|
||||
return failedRecently.getAndSet(0);
|
||||
}
|
||||
|
||||
public long getLimitsExceeded() {
|
||||
return limitsExceeded.get();
|
||||
}
|
||||
|
||||
public long incPendingAntiAffineRequests(long v) {
|
||||
return pendingAntiAffineRequests.addAndGet(v);
|
||||
}
|
||||
|
||||
/**
|
||||
* Probe for an outstanding AA request being true
|
||||
* @return true if there is an outstanding AA Request
|
||||
|
@ -271,94 +172,14 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
* Note that a role failed, text will
|
||||
* be used in any diagnostics if an exception
|
||||
* is later raised.
|
||||
* @param startupFailure flag to indicate this was a startup event
|
||||
* @param text text about the failure
|
||||
* @param outcome outcome of the container
|
||||
*/
|
||||
public synchronized void noteFailed(boolean startupFailure, String text,
|
||||
ContainerOutcome outcome) {
|
||||
public synchronized void noteFailed(String text) {
|
||||
if (text != null) {
|
||||
failureMessage = text;
|
||||
}
|
||||
switch (outcome) {
|
||||
case Preempted:
|
||||
preempted.incrementAndGet();
|
||||
break;
|
||||
|
||||
case Node_failure:
|
||||
nodeFailed.incrementAndGet();
|
||||
failed.incrementAndGet();
|
||||
break;
|
||||
|
||||
case Failed_limits_exceeded: // exceeded memory or CPU; app/configuration related
|
||||
limitsExceeded.incrementAndGet();
|
||||
// fall through
|
||||
case Failed: // application failure, possibly node related, possibly not
|
||||
default: // anything else (future-proofing)
|
||||
failed.incrementAndGet();
|
||||
failedRecently.incrementAndGet();
|
||||
//have a look to see if it short lived
|
||||
if (startupFailure) {
|
||||
incStartFailed();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
public long getStartFailed() {
|
||||
return startFailed.get();
|
||||
}
|
||||
|
||||
public synchronized void incStartFailed() {
|
||||
startFailed.getAndIncrement();
|
||||
}
|
||||
|
||||
public synchronized String getFailureMessage() {
|
||||
return failureMessage;
|
||||
}
|
||||
|
||||
public long getCompleted() {
|
||||
return completed.get();
|
||||
}
|
||||
|
||||
public long incCompleted() {
|
||||
return completed.incrementAndGet();
|
||||
}
|
||||
public long getStarted() {
|
||||
return started.get();
|
||||
}
|
||||
|
||||
public synchronized void incStarted() {
|
||||
started.incrementAndGet();
|
||||
}
|
||||
|
||||
public long getTotalRequested() {
|
||||
return totalRequested.get();
|
||||
}
|
||||
|
||||
public long getPreempted() {
|
||||
return preempted.get();
|
||||
}
|
||||
|
||||
public long getNodeFailed() {
|
||||
return nodeFailed.get();
|
||||
}
|
||||
|
||||
public long getPendingAntiAffineRequests() {
|
||||
return pendingAntiAffineRequests.get();
|
||||
}
|
||||
|
||||
public void setPendingAntiAffineRequests(long pendingAntiAffineRequests) {
|
||||
this.pendingAntiAffineRequests.set(pendingAntiAffineRequests);
|
||||
}
|
||||
|
||||
public long decPendingAntiAffineRequests() {
|
||||
return pendingAntiAffineRequests.decToFloor(1);
|
||||
}
|
||||
|
||||
public OutstandingRequest getOutstandingAArequest() {
|
||||
return outstandingAArequest;
|
||||
}
|
||||
|
||||
public void setOutstandingAArequest(OutstandingRequest outstandingAArequest) {
|
||||
this.outstandingAArequest = outstandingAArequest;
|
||||
|
@ -379,11 +200,50 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
public void cancelOutstandingAARequest() {
|
||||
if (outstandingAArequest != null) {
|
||||
setOutstandingAArequest(null);
|
||||
setPendingAntiAffineRequests(0);
|
||||
decRequested();
|
||||
}
|
||||
}
|
||||
|
||||
public long getDesired() {
|
||||
return componentMetrics.containersDesired.value();
|
||||
}
|
||||
|
||||
long getRunning() {
|
||||
return componentMetrics.containersRunning.value();
|
||||
}
|
||||
|
||||
public long getPending() {
|
||||
return componentMetrics.containersPending.value();
|
||||
}
|
||||
|
||||
public long getAAPending() {
|
||||
return componentMetrics.pendingAAContainers.value();
|
||||
}
|
||||
|
||||
void decAAPending() {
|
||||
componentMetrics.pendingAAContainers.decr();
|
||||
}
|
||||
void setAAPending(long n) {
|
||||
componentMetrics.pendingAAContainers.set((int)n);
|
||||
}
|
||||
|
||||
long getFailedRecently() {
|
||||
return componentMetrics.failedSinceLastThreshold.value();
|
||||
}
|
||||
|
||||
long resetFailedRecently() {
|
||||
long count =
|
||||
componentMetrics.failedSinceLastThreshold.value();
|
||||
componentMetrics.failedSinceLastThreshold.set(0);
|
||||
return count;
|
||||
}
|
||||
|
||||
long getFailed() {
|
||||
return componentMetrics.containersFailed.value();
|
||||
}
|
||||
|
||||
String getFailureMessage() {
|
||||
return this.failureMessage;
|
||||
}
|
||||
/**
|
||||
* Get the number of roles we are short of.
|
||||
* nodes released are ignored.
|
||||
|
@ -392,10 +252,9 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
*/
|
||||
public long getDelta() {
|
||||
long inuse = getActualAndRequested();
|
||||
long delta = desired.get() - inuse;
|
||||
long delta = getDesired() - inuse;
|
||||
if (delta < 0) {
|
||||
//if we are releasing, remove the number that are already released.
|
||||
delta += releasing.get();
|
||||
//but never switch to a positive
|
||||
delta = Math.min(delta, 0);
|
||||
}
|
||||
|
@ -407,43 +266,7 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
* @return the size of the application when outstanding requests are included.
|
||||
*/
|
||||
public long getActualAndRequested() {
|
||||
return actual.get() + requested.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("RoleStatus{");
|
||||
sb.append("name='").append(name).append('\'');
|
||||
sb.append(", group=").append(group);
|
||||
sb.append(", key=").append(key);
|
||||
sb.append(", desired=").append(desired);
|
||||
sb.append(", actual=").append(actual);
|
||||
sb.append(", requested=").append(requested);
|
||||
sb.append(", releasing=").append(releasing);
|
||||
sb.append(", failed=").append(failed);
|
||||
sb.append(", startFailed=").append(startFailed);
|
||||
sb.append(", started=").append(started);
|
||||
sb.append(", completed=").append(completed);
|
||||
sb.append(", totalRequested=").append(totalRequested);
|
||||
sb.append(", preempted=").append(preempted);
|
||||
sb.append(", nodeFailed=").append(nodeFailed);
|
||||
sb.append(", failedRecently=").append(failedRecently);
|
||||
sb.append(", limitsExceeded=").append(limitsExceeded);
|
||||
sb.append(", resourceRequirements=").append(resourceRequirements);
|
||||
sb.append(", isAntiAffinePlacement=").append(isAntiAffinePlacement());
|
||||
if (isAntiAffinePlacement()) {
|
||||
sb.append(", pendingAntiAffineRequests=").append(pendingAntiAffineRequests);
|
||||
sb.append(", outstandingAArequest=").append(outstandingAArequest);
|
||||
}
|
||||
sb.append(", failureMessage='").append(failureMessage).append('\'');
|
||||
sb.append(", providerRole=").append(providerRole);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Object clone() throws CloneNotSupportedException {
|
||||
return super.clone();
|
||||
return getRunning() + getPending();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -454,15 +277,6 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
return providerRole;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the statistics map from the current data
|
||||
* @return a map for use in statistics reports
|
||||
*/
|
||||
public Map<String, Integer> buildStatistics() {
|
||||
ComponentInformation componentInformation = serialize();
|
||||
return componentInformation.buildStatistics();
|
||||
}
|
||||
|
||||
/**
|
||||
* Produced a serialized form which can be served up as JSON
|
||||
* @return a summary of the current role status.
|
||||
|
@ -470,21 +284,6 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
public synchronized ComponentInformation serialize() {
|
||||
ComponentInformation info = new ComponentInformation();
|
||||
info.name = name;
|
||||
info.priority = getPriority();
|
||||
info.desired = desired.intValue();
|
||||
info.actual = actual.intValue();
|
||||
info.requested = requested.intValue();
|
||||
info.releasing = releasing.intValue();
|
||||
info.failed = failed.intValue();
|
||||
info.startFailed = startFailed.intValue();
|
||||
info.placementPolicy = getPlacementPolicy();
|
||||
info.failureMessage = failureMessage;
|
||||
info.totalRequested = totalRequested.intValue();
|
||||
info.failedRecently = failedRecently.intValue();
|
||||
info.nodeFailed = nodeFailed.intValue();
|
||||
info.preempted = preempted.intValue();
|
||||
info.pendingAntiAffineRequestCount = pendingAntiAffineRequests.intValue();
|
||||
info.isAARequestOutstanding = isAARequestOutstanding();
|
||||
return info;
|
||||
}
|
||||
|
||||
|
@ -542,17 +341,6 @@ public final class RoleStatus implements Cloneable, MetricSet {
|
|||
public synchronized RoleStatistics getStatistics() {
|
||||
RoleStatistics stats = new RoleStatistics();
|
||||
stats.activeAA = getOutstandingAARequestCount();
|
||||
stats.actual = actual.get();
|
||||
stats.desired = desired.get();
|
||||
stats.failed = failed.get();
|
||||
stats.limitsExceeded = limitsExceeded.get();
|
||||
stats.nodeFailed = nodeFailed.get();
|
||||
stats.preempted = preempted.get();
|
||||
stats.releasing = releasing.get();
|
||||
stats.requested = requested.get();
|
||||
stats.started = started.get();
|
||||
stats.startFailed = startFailed.get();
|
||||
stats.totalRequested = totalRequested.get();
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,15 +21,13 @@ package org.apache.slider.server.appmaster.state;
|
|||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.ClusterNode;
|
||||
import org.apache.slider.api.StatusKeys;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.types.ApplicationLivenessInformation;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.api.types.NodeInformation;
|
||||
import org.apache.slider.api.types.RoleStatistics;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.core.exceptions.NoSuchNodeException;
|
||||
import org.apache.slider.core.registry.docstore.PublishedConfigSet;
|
||||
import org.apache.slider.core.registry.docstore.PublishedExportsSet;
|
||||
|
@ -105,29 +103,7 @@ public interface StateAccessForProviders {
|
|||
* Get the current cluster description
|
||||
* @return the actual state of the cluster
|
||||
*/
|
||||
ClusterDescription getClusterStatus();
|
||||
|
||||
/**
|
||||
* Get at the snapshot of the resource config
|
||||
* Changes here do not affect the application state.
|
||||
* @return the most recent settings
|
||||
*/
|
||||
ConfTreeOperations getResourcesSnapshot();
|
||||
|
||||
/**
|
||||
* Get at the snapshot of the appconf config
|
||||
* Changes here do not affect the application state.
|
||||
* @return the most recent settings
|
||||
*/
|
||||
ConfTreeOperations getAppConfSnapshot();
|
||||
|
||||
/**
|
||||
* Get at the snapshot of the internals config.
|
||||
* Changes here do not affect the application state.
|
||||
* @return the internals settings
|
||||
*/
|
||||
|
||||
ConfTreeOperations getInternalsSnapshot();
|
||||
Application getApplication();
|
||||
|
||||
/**
|
||||
* Flag set to indicate the application is live -this only happens
|
||||
|
@ -135,20 +111,6 @@ public interface StateAccessForProviders {
|
|||
*/
|
||||
boolean isApplicationLive();
|
||||
|
||||
long getSnapshotTime();
|
||||
|
||||
/**
|
||||
* Get a snapshot of the entire aggregate configuration
|
||||
* @return the aggregate configuration
|
||||
*/
|
||||
AggregateConf getInstanceDefinitionSnapshot();
|
||||
|
||||
/**
|
||||
* Get the desired/unresolved value
|
||||
* @return unresolved
|
||||
*/
|
||||
AggregateConf getUnresolvedInstanceDefinition();
|
||||
|
||||
/**
|
||||
* Look up a role from its key -or fail
|
||||
*
|
||||
|
@ -232,14 +194,8 @@ public interface StateAccessForProviders {
|
|||
/**
|
||||
* Update the cluster description with anything interesting
|
||||
*/
|
||||
ClusterDescription refreshClusterStatus();
|
||||
Application refreshClusterStatus();
|
||||
|
||||
/**
|
||||
* Get a deep clone of the role status list. Concurrent events may mean this
|
||||
* list (or indeed, some of the role status entries) may be inconsistent
|
||||
* @return a snapshot of the role status entries
|
||||
*/
|
||||
List<RoleStatus> cloneRoleStatusList();
|
||||
|
||||
/**
|
||||
* get application liveness information
|
||||
|
@ -247,13 +203,6 @@ public interface StateAccessForProviders {
|
|||
*/
|
||||
ApplicationLivenessInformation getApplicationLivenessInformation();
|
||||
|
||||
/**
|
||||
* Get the live statistics map
|
||||
* @return a map of statistics values, defined in the {@link StatusKeys}
|
||||
* keylist.
|
||||
*/
|
||||
Map<String, Integer> getLiveStatistics();
|
||||
|
||||
/**
|
||||
* Get a snapshot of component information.
|
||||
* <p>
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*/
|
||||
package org.apache.slider.server.appmaster.web;
|
||||
|
||||
import com.codahale.metrics.MetricRegistry;
|
||||
import com.codahale.metrics.health.HealthCheckRegistry;
|
||||
import com.codahale.metrics.servlets.HealthCheckServlet;
|
||||
import com.codahale.metrics.servlets.MetricsServlet;
|
||||
import com.codahale.metrics.servlets.PingServlet;
|
||||
|
@ -28,10 +30,8 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
|
|||
import org.apache.hadoop.yarn.webapp.Dispatcher;
|
||||
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
|
||||
import org.apache.slider.server.appmaster.web.rest.AMWadlGeneratorConfig;
|
||||
import org.apache.slider.server.appmaster.web.rest.AMWebServices;
|
||||
import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
|
||||
import org.apache.slider.server.appmaster.web.rest.SliderJacksonJaxbJsonProvider;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -39,6 +39,8 @@ import java.util.Map;
|
|||
import java.util.logging.Level;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -65,6 +67,7 @@ public class SliderAMWebApp extends WebApp {
|
|||
bind(GenericExceptionHandler.class);
|
||||
// bind the REST interface
|
||||
bind(AMWebServices.class);
|
||||
|
||||
//bind(AMAgentWebServices.class);
|
||||
route("/", SliderAMController.class);
|
||||
route(CONTAINER_STATS, SliderAMController.class, "containerStats");
|
||||
|
@ -81,11 +84,9 @@ public class SliderAMWebApp extends WebApp {
|
|||
serve(path).with(Dispatcher.class);
|
||||
}
|
||||
|
||||
// metrics
|
||||
MetricsAndMonitoring monitoring =
|
||||
webAppApi.getMetricsAndMonitoring();
|
||||
serve(SYSTEM_HEALTHCHECK).with(new HealthCheckServlet(monitoring.getHealth()));
|
||||
serve(SYSTEM_METRICS).with(new MetricsServlet(monitoring.getMetrics()));
|
||||
serve(SYSTEM_HEALTHCHECK)
|
||||
.with(new HealthCheckServlet(new HealthCheckRegistry()));
|
||||
serve(SYSTEM_METRICS).with(new MetricsServlet(new MetricRegistry()));
|
||||
serve(SYSTEM_PING).with(new PingServlet());
|
||||
serve(SYSTEM_THREADS).with(new ThreadDumpServlet());
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import org.apache.slider.server.appmaster.actions.QueueAccess;
|
|||
import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
|
||||
import org.apache.slider.server.appmaster.state.AppState;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
|
||||
|
||||
/**
|
||||
* Interface to pass information from the Slider AppMaster to the WebApp
|
||||
|
@ -56,10 +55,4 @@ public interface WebAppApi {
|
|||
* @return the immediate and scheduled queues
|
||||
*/
|
||||
QueueAccess getQueues();
|
||||
|
||||
/**
|
||||
* Local cache of content
|
||||
* @return the cache
|
||||
*/
|
||||
ContentCache getContentCache();
|
||||
}
|
||||
|
|
|
@ -18,11 +18,9 @@ package org.apache.slider.server.appmaster.web;
|
|||
|
||||
import org.apache.hadoop.registry.client.api.RegistryOperations;
|
||||
import org.apache.slider.providers.ProviderService;
|
||||
import org.apache.slider.server.appmaster.AppMasterActionOperations;
|
||||
import org.apache.slider.server.appmaster.actions.QueueAccess;
|
||||
import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -39,15 +37,10 @@ public class WebAppApiImpl implements WebAppApi {
|
|||
private final RegistryOperations registryOperations;
|
||||
private final MetricsAndMonitoring metricsAndMonitoring;
|
||||
private final QueueAccess queues;
|
||||
private final AppMasterActionOperations appMasterOperations;
|
||||
private final ContentCache contentCache;
|
||||
|
||||
public WebAppApiImpl(StateAccessForProviders appState,
|
||||
ProviderService provider, RegistryOperations registryOperations,
|
||||
MetricsAndMonitoring metricsAndMonitoring, QueueAccess queues,
|
||||
AppMasterActionOperations appMasterOperations, ContentCache contentCache) {
|
||||
this.appMasterOperations = appMasterOperations;
|
||||
this.contentCache = contentCache;
|
||||
MetricsAndMonitoring metricsAndMonitoring, QueueAccess queues) {
|
||||
checkNotNull(appState);
|
||||
checkNotNull(provider);
|
||||
this.queues = queues;
|
||||
|
@ -82,10 +75,4 @@ public class WebAppApiImpl implements WebAppApi {
|
|||
public QueueAccess getQueues() {
|
||||
return queues;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ContentCache getContentCache() {
|
||||
return contentCache;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,13 +18,24 @@ package org.apache.slider.server.appmaster.web.rest;
|
|||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.server.appmaster.web.WebAppApi;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.ApplicationResource;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionStop;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.actions.StopResponse;
|
||||
import org.apache.slider.server.appmaster.web.rest.management.ManagementResource;
|
||||
import org.apache.slider.server.appmaster.web.rest.publisher.PublisherResource;
|
||||
import org.apache.slider.server.appmaster.web.rest.registry.RegistryResource;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
|
||||
import static org.apache.slider.server.appmaster.web.rest.RestPaths.ACTION_STOP;
|
||||
|
||||
/**
|
||||
* The available REST services exposed by a slider AM.
|
||||
|
@ -38,7 +49,6 @@ public class AMWebServices {
|
|||
private final ManagementResource managementResource;
|
||||
private final PublisherResource publisherResource;
|
||||
private final RegistryResource registryResource;
|
||||
private final ApplicationResource applicationResource;
|
||||
|
||||
@Inject
|
||||
public AMWebServices(WebAppApi slider) {
|
||||
|
@ -46,7 +56,6 @@ public class AMWebServices {
|
|||
managementResource = new ManagementResource(slider);
|
||||
publisherResource = new PublisherResource(slider);
|
||||
registryResource = new RegistryResource(slider);
|
||||
applicationResource = new ApplicationResource(slider);
|
||||
}
|
||||
|
||||
@Path(RestPaths.SLIDER_SUBPATH_MANAGEMENT)
|
||||
|
@ -64,8 +73,20 @@ public class AMWebServices {
|
|||
return registryResource;
|
||||
}
|
||||
|
||||
|
||||
@GET
|
||||
@Path(RestPaths.SLIDER_SUBPATH_APPLICATION)
|
||||
public ApplicationResource getApplicationResource() {
|
||||
return applicationResource;
|
||||
@Produces({APPLICATION_JSON})
|
||||
public Application getApplicationResource() {
|
||||
return slider.getAppState().getApplication();
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path(ACTION_STOP)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public StopResponse actionStop(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo,
|
||||
String body) {
|
||||
return new RestActionStop(slider).stop(request, uriInfo, body);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,32 +34,14 @@ public class RestPaths {
|
|||
/**
|
||||
* agent content root: {@value}
|
||||
*/
|
||||
public static final String WS_AGENT_CONTEXT_ROOT = "/" + AGENT_WS_CONTEXT;
|
||||
public static final String V1_SLIDER = "/v1/slider";
|
||||
public static final String SLIDER_CONTEXT_ROOT = WS_CONTEXT_ROOT + V1_SLIDER;
|
||||
public static final String RELATIVE_API = WS_CONTEXT + V1_SLIDER;
|
||||
public static final String SLIDER_AGENT_CONTEXT_ROOT = WS_AGENT_CONTEXT_ROOT + V1_SLIDER;
|
||||
public static final String SLIDER_CONTEXT_ROOT = WS_CONTEXT_ROOT + "/v1";
|
||||
public static final String RELATIVE_API = WS_CONTEXT + "/v1";
|
||||
public static final String MANAGEMENT = "mgmt";
|
||||
public static final String SLIDER_SUBPATH_MANAGEMENT = "/" + MANAGEMENT;
|
||||
public static final String SLIDER_SUBPATH_AGENTS = "/agents";
|
||||
public static final String SLIDER_SUBPATH_PUBLISHER = "/publisher";
|
||||
|
||||
|
||||
/**
|
||||
* management path: {@value}
|
||||
*/
|
||||
public static final String SLIDER_PATH_MANAGEMENT = SLIDER_CONTEXT_ROOT
|
||||
+ SLIDER_SUBPATH_MANAGEMENT;
|
||||
|
||||
public static final String RELATIVE_PATH_MANAGEMENT = RELATIVE_API
|
||||
+ SLIDER_SUBPATH_MANAGEMENT;
|
||||
|
||||
/**
|
||||
* Agents: {@value}
|
||||
*/
|
||||
public static final String SLIDER_PATH_AGENTS = SLIDER_AGENT_CONTEXT_ROOT
|
||||
+ SLIDER_SUBPATH_AGENTS;
|
||||
|
||||
/**
|
||||
* Publisher: {@value}
|
||||
*/
|
||||
|
@ -105,6 +87,7 @@ public class RestPaths {
|
|||
public static final String SYSTEM = "/system";
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Codahale Metrics - health: {@value}
|
||||
*/
|
||||
|
|
|
@ -19,17 +19,7 @@
|
|||
package org.apache.slider.server.appmaster.web.rest.application;
|
||||
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.AggregateModelRefresher;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.AppconfRefresher;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.CachedContent;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.LiveComponentsRefresher;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.LiveContainersRefresher;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.LiveNodesRefresher;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.LiveResourcesRefresher;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.LiveStatisticsRefresher;
|
||||
|
||||
import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
|
||||
|
||||
public class ApplicationResouceContentCacheFactory {
|
||||
public static final int LIFESPAN = 500;
|
||||
|
@ -41,23 +31,6 @@ public class ApplicationResouceContentCacheFactory {
|
|||
public static ContentCache createContentCache(
|
||||
StateAccessForProviders state) {
|
||||
ContentCache cache = new ContentCache();
|
||||
cache.put(LIVE_RESOURCES, new CachedContent<>(LIFESPAN, new LiveResourcesRefresher(state)));
|
||||
cache.put(LIVE_CONTAINERS, new CachedContent<>(LIFESPAN, new LiveContainersRefresher(state)));
|
||||
cache.put(LIVE_COMPONENTS, new CachedContent<>(LIFESPAN, new LiveComponentsRefresher(state)));
|
||||
cache.put(LIVE_NODES, new CachedContent<>(LIFESPAN, new LiveNodesRefresher(state)));
|
||||
cache.put(MODEL_DESIRED,
|
||||
new CachedContent<>(LIFESPAN, new AggregateModelRefresher(state, false)));
|
||||
cache.put(MODEL_RESOLVED,
|
||||
new CachedContent<>(LIFESPAN, new AggregateModelRefresher(state, true)));
|
||||
cache.put(MODEL_RESOLVED_APPCONF,
|
||||
new CachedContent<>(LIFESPAN, new AppconfRefresher(state, false, false)));
|
||||
cache.put(MODEL_RESOLVED_RESOURCES,
|
||||
new CachedContent<>(LIFESPAN, new AppconfRefresher(state, false, true)));
|
||||
cache.put(MODEL_DESIRED_APPCONF,
|
||||
new CachedContent<>(LIFESPAN, new AppconfRefresher(state, true, false)));
|
||||
cache.put(MODEL_DESIRED_RESOURCES,
|
||||
new CachedContent<>(LIFESPAN, new AppconfRefresher(state, true, true)));
|
||||
cache.put(LIVE_STATISTICS, new CachedContent<>(LIFESPAN, new LiveStatisticsRefresher(state)));
|
||||
return cache;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,516 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.appmaster.web.rest.application;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
import org.apache.slider.api.types.ApplicationLivenessInformation;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.api.types.ContainerInformation;
|
||||
import org.apache.slider.api.types.NodeInformation;
|
||||
import org.apache.slider.api.types.NodeInformationList;
|
||||
import org.apache.slider.core.conf.AggregateConf;
|
||||
import org.apache.slider.core.conf.ConfTree;
|
||||
import org.apache.slider.core.exceptions.NoSuchNodeException;
|
||||
import org.apache.slider.core.persist.ConfTreeSerDeser;
|
||||
import org.apache.slider.server.appmaster.actions.ActionFlexCluster;
|
||||
import org.apache.slider.server.appmaster.actions.AsyncAction;
|
||||
import org.apache.slider.server.appmaster.actions.QueueAccess;
|
||||
import org.apache.slider.server.appmaster.state.RoleInstance;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
import org.apache.slider.server.appmaster.web.WebAppApi;
|
||||
import org.apache.slider.server.appmaster.web.rest.AbstractSliderResource;
|
||||
import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
|
||||
|
||||
import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionStop;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.actions.StopResponse;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
|
||||
import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionPing;
|
||||
import org.apache.slider.api.types.PingInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.inject.Singleton;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.DELETE;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.HEAD;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.PUT;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Context;
|
||||
|
||||
import static javax.ws.rs.core.MediaType.*;
|
||||
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@Singleton
|
||||
@SuppressWarnings("unchecked")
|
||||
public class ApplicationResource extends AbstractSliderResource {
|
||||
private static final Logger log =
|
||||
LoggerFactory.getLogger(ApplicationResource.class);
|
||||
|
||||
public static final List<String> LIVE_ENTRIES = toJsonList("resources",
|
||||
"containers",
|
||||
"components",
|
||||
"nodes",
|
||||
"statistics",
|
||||
"internal");
|
||||
|
||||
public static final List<String> ROOT_ENTRIES =
|
||||
toJsonList("model", "live", "actions");
|
||||
|
||||
public static final List<String> MODEL_ENTRIES =
|
||||
toJsonList("desired", "resolved");
|
||||
|
||||
/**
|
||||
* This is the cache of all content ... each entry is
|
||||
* designed to be self-refreshing on get operations,
|
||||
* so is never very out of date, yet many GETs don't
|
||||
* overload the rest of the system.
|
||||
*/
|
||||
private final ContentCache cache;
|
||||
private final StateAccessForProviders state;
|
||||
private final QueueAccess actionQueues;
|
||||
|
||||
public ApplicationResource(WebAppApi slider) {
|
||||
super(slider);
|
||||
state = slider.getAppState();
|
||||
cache = slider.getContentCache();
|
||||
actionQueues = slider.getQueues();
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a new JSON-marshallable list of string elements
|
||||
* @param elements elements
|
||||
* @return something that can be returned
|
||||
*/
|
||||
private static List<String> toJsonList(String... elements) {
|
||||
return Lists.newArrayList(elements);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/")
|
||||
@Produces({APPLICATION_JSON})
|
||||
public List<String> getRoot() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION);
|
||||
return ROOT_ENTRIES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enum model values: desired and resolved
|
||||
* @return the desired and resolved model
|
||||
*/
|
||||
@GET
|
||||
@Path(MODEL)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public List<String> getModel() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL);
|
||||
return MODEL_ENTRIES;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(MODEL_DESIRED)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public AggregateConf getModelDesired() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED);
|
||||
return lookupAggregateConf(MODEL_DESIRED);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(MODEL_DESIRED_APPCONF)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ConfTree getModelDesiredAppconf() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED_APPCONF);
|
||||
return lookupConfTree(MODEL_DESIRED_APPCONF);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(MODEL_DESIRED_RESOURCES)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ConfTree getModelDesiredResources() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED_RESOURCES);
|
||||
return lookupConfTree(MODEL_DESIRED_RESOURCES);
|
||||
}
|
||||
|
||||
/*
|
||||
@PUT
|
||||
@Path(MODEL_DESIRED_RESOURCES)
|
||||
// @Consumes({APPLICATION_JSON, TEXT_PLAIN})
|
||||
@Consumes({TEXT_PLAIN})
|
||||
@Produces({APPLICATION_JSON})
|
||||
*/
|
||||
public ConfTree setModelDesiredResources(
|
||||
String json) {
|
||||
markPut(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED_RESOURCES);
|
||||
int size = json != null ? json.length() : 0;
|
||||
log.info("PUT {} {} bytes:\n{}", MODEL_DESIRED_RESOURCES,
|
||||
size,
|
||||
json);
|
||||
if (size == 0) {
|
||||
log.warn("No JSON in PUT request; rejecting");
|
||||
throw new BadRequestException("No JSON in PUT");
|
||||
}
|
||||
|
||||
try {
|
||||
ConfTreeSerDeser serDeser = new ConfTreeSerDeser();
|
||||
ConfTree updated = serDeser.fromJson(json);
|
||||
queue(new ActionFlexCluster("flex",
|
||||
1, TimeUnit.MILLISECONDS,
|
||||
updated));
|
||||
// return the updated value, even though it potentially hasn't yet
|
||||
// been executed
|
||||
return updated;
|
||||
} catch (Exception e) {
|
||||
throw buildException("PUT to "+ MODEL_DESIRED_RESOURCES , e);
|
||||
}
|
||||
}
|
||||
@PUT
|
||||
@Path(MODEL_DESIRED_RESOURCES)
|
||||
@Consumes({APPLICATION_JSON})
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ConfTree setModelDesiredResources(
|
||||
ConfTree updated) {
|
||||
try {
|
||||
queue(new ActionFlexCluster("flex",
|
||||
1, TimeUnit.MILLISECONDS,
|
||||
updated));
|
||||
// return the updated value, even though it potentially hasn't yet
|
||||
// been executed
|
||||
return updated;
|
||||
} catch (Exception e) {
|
||||
throw buildException("PUT to "+ MODEL_DESIRED_RESOURCES , e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@GET
|
||||
@Path(MODEL_RESOLVED)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public AggregateConf getModelResolved() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL_RESOLVED);
|
||||
return lookupAggregateConf(MODEL_RESOLVED);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(MODEL_RESOLVED_APPCONF)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ConfTree getModelResolvedAppconf() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL_RESOLVED_APPCONF);
|
||||
return lookupConfTree(MODEL_RESOLVED_APPCONF);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(MODEL_RESOLVED_RESOURCES)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ConfTree getModelResolvedResources() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, MODEL_RESOLVED_RESOURCES);
|
||||
return lookupConfTree(MODEL_RESOLVED_RESOURCES);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public List<String> getLive() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE);
|
||||
return LIVE_ENTRIES;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE_RESOURCES)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ConfTree getLiveResources() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_RESOURCES);
|
||||
return lookupConfTree(LIVE_RESOURCES);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE_CONTAINERS)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public Map<String, ContainerInformation> getLiveContainers() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_CONTAINERS);
|
||||
try {
|
||||
return (Map<String, ContainerInformation>)cache.lookup(
|
||||
LIVE_CONTAINERS);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_CONTAINERS, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE_CONTAINERS + "/{containerId}")
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ContainerInformation getLiveContainer(
|
||||
@PathParam("containerId") String containerId) {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_CONTAINERS);
|
||||
try {
|
||||
RoleInstance id = state.getLiveInstanceByContainerID(containerId);
|
||||
return id.serialize();
|
||||
} catch (NoSuchNodeException e) {
|
||||
throw new NotFoundException("Unknown container: " + containerId);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_CONTAINERS + "/"+ containerId, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE_COMPONENTS)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public Map<String, ComponentInformation> getLiveComponents() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
|
||||
try {
|
||||
return (Map<String, ComponentInformation>) cache.lookup(LIVE_COMPONENTS);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_COMPONENTS, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE_COMPONENTS + "/{component}")
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ComponentInformation getLiveComponent(
|
||||
@PathParam("component") String component) {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
|
||||
try {
|
||||
return state.getComponentInformation(component);
|
||||
} catch (YarnRuntimeException e) {
|
||||
throw new NotFoundException("Unknown component: " + component);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_CONTAINERS +"/" + component, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Liveness information for the application as a whole
|
||||
* @return snapshot of liveness
|
||||
*/
|
||||
@GET
|
||||
@Path(LIVE_LIVENESS)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ApplicationLivenessInformation getLivenessInformation() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_LIVENESS);
|
||||
try {
|
||||
return state.getApplicationLivenessInformation();
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_CONTAINERS, e);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: decide what structure to return here, then implement
|
||||
|
||||
@GET
|
||||
@Path(LIVE_LIVENESS + "/{component}")
|
||||
@Produces({APPLICATION_JSON})
|
||||
public ApplicationLivenessInformation getLivenessForComponent(
|
||||
@PathParam("component") String component) {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
|
||||
try {
|
||||
RoleStatus roleStatus = state.lookupRoleStatus(component);
|
||||
ApplicationLivenessInformation info = new ApplicationLivenessInformation();
|
||||
info.requested = roleStatus.getRequested();
|
||||
info.allRequestsSatisfied = info.requested == 0;
|
||||
return info;
|
||||
} catch (YarnRuntimeException e) {
|
||||
throw new NotFoundException("Unknown component: " + component);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_LIVENESS + "/" + component, e);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
@GET
|
||||
@Path(LIVE_NODES)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public NodeInformationList getLiveNodes() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
|
||||
try {
|
||||
return (NodeInformationList) cache.lookup(LIVE_NODES);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_COMPONENTS, e);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path(LIVE_NODES + "/{hostname}")
|
||||
@Produces({APPLICATION_JSON})
|
||||
public NodeInformation getLiveNode(@PathParam("hostname") String hostname) {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
|
||||
try {
|
||||
NodeInformation ni = state.getNodeInformation(hostname);
|
||||
if (ni != null) {
|
||||
return ni;
|
||||
} else {
|
||||
throw new NotFoundException("Unknown node: " + hostname);
|
||||
}
|
||||
} catch (NotFoundException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_COMPONENTS + "/" + hostname, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Statistics of the application
|
||||
* @return snapshot statistics
|
||||
*/
|
||||
@GET
|
||||
@Path(LIVE_STATISTICS)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public Map<String, Integer> getLiveStatistics() {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, LIVE_LIVENESS);
|
||||
try {
|
||||
return (Map<String, Integer>) cache.lookup(LIVE_STATISTICS);
|
||||
} catch (Exception e) {
|
||||
throw buildException(LIVE_STATISTICS, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method; look up an aggregate configuration in the cache from
|
||||
* a key, or raise an exception
|
||||
* @param key key to resolve
|
||||
* @return the configuration
|
||||
* @throws WebApplicationException on a failure
|
||||
*/
|
||||
protected AggregateConf lookupAggregateConf(String key) {
|
||||
try {
|
||||
return (AggregateConf) cache.lookup(key);
|
||||
} catch (Exception e) {
|
||||
throw buildException(key, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Helper method; look up an conf tree in the cache from
|
||||
* a key, or raise an exception
|
||||
* @param key key to resolve
|
||||
* @return the configuration
|
||||
* @throws WebApplicationException on a failure
|
||||
*/
|
||||
protected ConfTree lookupConfTree(String key) {
|
||||
try {
|
||||
return (ConfTree) cache.lookup(key);
|
||||
} catch (Exception e) {
|
||||
throw buildException(key, e);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************
|
||||
|
||||
ACTION PING
|
||||
|
||||
**************************************************************************/
|
||||
|
||||
@GET
|
||||
@Path(ACTION_PING)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public PingInformation actionPingGet(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo) {
|
||||
markGet(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
|
||||
return new RestActionPing().ping(request, uriInfo, "");
|
||||
}
|
||||
|
||||
@POST
|
||||
@Path(ACTION_PING)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public PingInformation actionPingPost(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo,
|
||||
String body) {
|
||||
markPost(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
|
||||
return new RestActionPing().ping(request, uriInfo, body);
|
||||
}
|
||||
|
||||
@PUT
|
||||
@Path(ACTION_PING)
|
||||
@Consumes({TEXT_PLAIN})
|
||||
@Produces({APPLICATION_JSON})
|
||||
public PingInformation actionPingPut(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo,
|
||||
String body) {
|
||||
markPut(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
|
||||
return new RestActionPing().ping(request, uriInfo, body);
|
||||
}
|
||||
|
||||
@DELETE
|
||||
@Path(ACTION_PING)
|
||||
@Consumes({APPLICATION_JSON})
|
||||
@Produces({APPLICATION_JSON})
|
||||
public PingInformation actionPingDelete(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo) {
|
||||
markDelete(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
|
||||
return new RestActionPing().ping(request, uriInfo, "");
|
||||
}
|
||||
|
||||
@HEAD
|
||||
@Path(ACTION_PING)
|
||||
public Object actionPingHead(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo) {
|
||||
mark("HEAD", SLIDER_SUBPATH_APPLICATION, ACTION_PING);
|
||||
return new RestActionPing().ping(request, uriInfo, "");
|
||||
}
|
||||
|
||||
/* ************************************************************************
|
||||
|
||||
ACTION STOP
|
||||
|
||||
**************************************************************************/
|
||||
|
||||
|
||||
@POST
|
||||
@Path(ACTION_STOP)
|
||||
@Produces({APPLICATION_JSON})
|
||||
public StopResponse actionStop(@Context HttpServletRequest request,
|
||||
@Context UriInfo uriInfo,
|
||||
String body) {
|
||||
markPost(SLIDER_SUBPATH_APPLICATION, ACTION_STOP);
|
||||
return new RestActionStop(slider).stop(request, uriInfo, body);
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule an action
|
||||
* @param action for delayed execution
|
||||
*/
|
||||
public void schedule(AsyncAction action) {
|
||||
actionQueues.schedule(action);
|
||||
}
|
||||
|
||||
/**
|
||||
* Put an action on the immediate queue -to be executed when the queue
|
||||
* reaches it.
|
||||
* @param action action to queue
|
||||
*/
|
||||
public void queue(AsyncAction action) {
|
||||
actionQueues.put(action);
|
||||
}
|
||||
}
|
|
@ -23,7 +23,6 @@ import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
|||
|
||||
/**
|
||||
* Refresh the aggregate desired model via
|
||||
* {@link StateAccessForProviders#getInstanceDefinitionSnapshot()}
|
||||
*/
|
||||
public class AggregateModelRefresher
|
||||
implements ResourceRefresher<AggregateConf> {
|
||||
|
@ -39,9 +38,6 @@ public class AggregateModelRefresher
|
|||
|
||||
@Override
|
||||
public AggregateConf refresh() throws Exception {
|
||||
return
|
||||
resolved ?
|
||||
state.getInstanceDefinitionSnapshot()
|
||||
: state.getUnresolvedInstanceDefinition();
|
||||
return new AggregateConf();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,10 +44,7 @@ public class AppconfRefresher
|
|||
|
||||
@Override
|
||||
public ConfTree refresh() throws Exception {
|
||||
AggregateConf aggregateConf =
|
||||
unresolved ?
|
||||
state.getUnresolvedInstanceDefinition():
|
||||
state.getInstanceDefinitionSnapshot();
|
||||
AggregateConf aggregateConf = new AggregateConf();
|
||||
ConfTree ct = resources ? aggregateConf.getResources()
|
||||
: aggregateConf.getAppConf();
|
||||
return new ConfTreeSerDeser().fromInstance(ct);
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.appmaster.web.rest.application.resources;
|
||||
|
||||
import org.apache.slider.api.StatusKeys;
|
||||
import org.apache.slider.core.conf.ConfTree;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.server.appmaster.state.RoleStatus;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class LiveResourcesRefresher implements ResourceRefresher<ConfTree> {
|
||||
|
||||
private final StateAccessForProviders state;
|
||||
|
||||
public LiveResourcesRefresher(StateAccessForProviders state) {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfTree refresh() throws Exception {
|
||||
|
||||
// snapshot resources
|
||||
ConfTreeOperations resources = state.getResourcesSnapshot();
|
||||
// then add actual values
|
||||
Map<Integer, RoleStatus> roleStatusMap = state.getRoleStatusMap();
|
||||
|
||||
for (RoleStatus status : roleStatusMap.values()) {
|
||||
String name = status.getName();
|
||||
resources.setComponentOpt(name,
|
||||
StatusKeys.COMPONENT_INSTANCES_REQUESTING,
|
||||
status.getRequested());
|
||||
resources.setComponentOpt(name,
|
||||
StatusKeys.COMPONENT_INSTANCES_ACTUAL,
|
||||
status.getActual());
|
||||
resources.setComponentOpt(name,
|
||||
StatusKeys.COMPONENT_INSTANCES_RELEASING,
|
||||
status.getReleasing());
|
||||
resources.setComponentOpt(name,
|
||||
StatusKeys.COMPONENT_INSTANCES_FAILED,
|
||||
status.getFailed());
|
||||
resources.setComponentOpt(name,
|
||||
StatusKeys.COMPONENT_INSTANCES_COMPLETED,
|
||||
status.getCompleted());
|
||||
resources.setComponentOpt(name,
|
||||
StatusKeys.COMPONENT_INSTANCES_STARTED,
|
||||
status.getStarted());
|
||||
}
|
||||
return resources.getConfTree();
|
||||
}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.appmaster.web.rest.application.resources;
|
||||
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class LiveStatisticsRefresher implements ResourceRefresher<Map<String,Integer>> {
|
||||
|
||||
private final StateAccessForProviders state;
|
||||
|
||||
public LiveStatisticsRefresher(StateAccessForProviders state) {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> refresh() throws Exception {
|
||||
|
||||
// snapshot resources
|
||||
return state.getLiveStatistics();
|
||||
}
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.appmaster.web.rest.application.resources;
|
||||
|
||||
import org.apache.slider.core.conf.ConfTree;
|
||||
import org.apache.slider.core.conf.ConfTreeOperations;
|
||||
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
|
||||
|
||||
public class ResourceSnapshotRefresher implements ResourceRefresher<ConfTree> {
|
||||
|
||||
private final StateAccessForProviders state;
|
||||
|
||||
public ResourceSnapshotRefresher(StateAccessForProviders state) {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfTree refresh() throws Exception {
|
||||
|
||||
// snapshot resources
|
||||
ConfTreeOperations resources = state.getResourcesSnapshot();
|
||||
return resources.getConfTree();
|
||||
}
|
||||
}
|
|
@ -88,6 +88,7 @@ public class ManagementResource extends AbstractSliderResource {
|
|||
}
|
||||
|
||||
protected AggregateConf getAggregateConf() {
|
||||
return slider.getAppState().getInstanceDefinitionSnapshot();
|
||||
//TODO
|
||||
return new AggregateConf();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ public class ClusterSpecificationBlock extends SliderHamletBlock {
|
|||
* @return
|
||||
*/
|
||||
private String getJson() {
|
||||
return appState.getClusterStatus().toString();
|
||||
return appState.getApplication().toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
|||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.ClusterNode;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.types.ComponentInformation;
|
||||
import org.apache.slider.server.appmaster.state.RoleInstance;
|
||||
import org.apache.slider.server.appmaster.web.WebAppApi;
|
||||
|
@ -141,17 +141,9 @@ public class ContainerStatsBlock extends SliderHamletBlock {
|
|||
|
||||
}));
|
||||
|
||||
ClusterDescription desc = appState.getClusterStatus();
|
||||
Map<String, String> options = desc.getRole(name);
|
||||
Application application = appState.getApplication();
|
||||
Iterable<Entry<TableContent, String>> tableContent;
|
||||
|
||||
// Generate the pairs of data in the expected form
|
||||
if (null != options) {
|
||||
tableContent = Iterables.transform(options.entrySet(), stringStringPairFunc);
|
||||
} else {
|
||||
// Or catch that we have no options and provide "empty"
|
||||
tableContent = Collections.emptySet();
|
||||
}
|
||||
|
||||
// Generate the options used by this role
|
||||
generateRoleDetails(div, "role-options-wrap", "Role Options", tableContent);
|
||||
|
|
|
@ -22,15 +22,12 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
|||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
|
||||
import org.apache.slider.api.ClusterDescription;
|
||||
import org.apache.slider.api.StatusKeys;
|
||||
import org.apache.slider.api.types.ApplicationLivenessInformation;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
import org.apache.slider.core.registry.docstore.ExportEntry;
|
||||
import org.apache.slider.core.registry.docstore.PublishedExports;
|
||||
import org.apache.slider.core.registry.docstore.PublishedExportsSet;
|
||||
import org.apache.slider.providers.MonitorDetail;
|
||||
import org.apache.slider.providers.ProviderService;
|
||||
import org.apache.slider.server.appmaster.metrics.SliderMetrics;
|
||||
import org.apache.slider.server.appmaster.state.RoleStatus;
|
||||
import org.apache.slider.server.appmaster.web.WebAppApi;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -39,7 +36,6 @@ import org.slf4j.LoggerFactory;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -71,8 +67,7 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
// An extra method to make testing easier since you can't make an instance of Block
|
||||
@VisibleForTesting
|
||||
protected void doIndex(Hamlet html, String providerName) {
|
||||
ClusterDescription clusterStatus = appState.getClusterStatus();
|
||||
String name = clusterStatus.name;
|
||||
String name = appState.getApplicationName();
|
||||
if (name != null && (name.startsWith(" ") || name.endsWith(" "))) {
|
||||
name = "'" + name + "'";
|
||||
}
|
||||
|
@ -96,23 +91,23 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
._();
|
||||
table1.tr()
|
||||
.td("Create time: ")
|
||||
.td(getInfoAvoidingNulls(StatusKeys.INFO_CREATE_TIME_HUMAN))
|
||||
.td("N/A")
|
||||
._();
|
||||
table1.tr()
|
||||
.td("Running since: ")
|
||||
.td(getInfoAvoidingNulls(StatusKeys.INFO_LIVE_TIME_HUMAN))
|
||||
.td("N/A")
|
||||
._();
|
||||
table1.tr()
|
||||
.td("Time last flexed: ")
|
||||
.td(getInfoAvoidingNulls(StatusKeys.INFO_FLEX_TIME_HUMAN))
|
||||
.td("N/A")
|
||||
._();
|
||||
table1.tr()
|
||||
.td("Application storage path: ")
|
||||
.td(clusterStatus.dataPath)
|
||||
.td("N/A")
|
||||
._();
|
||||
table1.tr()
|
||||
.td("Application configuration path: ")
|
||||
.td(clusterStatus.originConfigurationPath)
|
||||
.td("N/A")
|
||||
._();
|
||||
table1._();
|
||||
div._();
|
||||
|
@ -136,7 +131,8 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
trb(header, "Placement");
|
||||
header._()._(); // tr & thead
|
||||
|
||||
List<RoleStatus> roleStatuses = appState.cloneRoleStatusList();
|
||||
List<RoleStatus> roleStatuses =
|
||||
new ArrayList<>(appState.getRoleStatusMap().values());
|
||||
Collections.sort(roleStatuses, new RoleStatus.CompareByName());
|
||||
for (RoleStatus status : roleStatuses) {
|
||||
String roleName = status.getName();
|
||||
|
@ -144,7 +140,7 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
String aatext;
|
||||
if (status.isAntiAffinePlacement()) {
|
||||
boolean aaRequestOutstanding = status.isAARequestOutstanding();
|
||||
int pending = (int)status.getPendingAntiAffineRequests();
|
||||
int pending = (int)status.getAAPending();
|
||||
aatext = buildAADetails(aaRequestOutstanding, pending);
|
||||
if (SliderUtils.isSet(status.getLabelExpression())) {
|
||||
aatext += " (label: " + status.getLabelExpression() + ")";
|
||||
|
@ -160,17 +156,17 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
} else {
|
||||
aatext = "";
|
||||
}
|
||||
if (status.getRequested() > 0) {
|
||||
if (status.getPending() > 0) {
|
||||
roleWithOpenRequest ++;
|
||||
}
|
||||
}
|
||||
SliderMetrics metrics = status.getComponentMetrics();
|
||||
table.tr()
|
||||
.td().a(nameUrl, roleName)._()
|
||||
.td(String.format("%d", status.getDesired()))
|
||||
.td(String.format("%d", status.getActual()))
|
||||
.td(String.format("%d", status.getRequested()))
|
||||
.td(String.format("%d", status.getFailed()))
|
||||
.td(String.format("%d", status.getStartFailed()))
|
||||
.td(String.format("%d", metrics.containersDesired.value()))
|
||||
.td(String.format("%d", metrics.containersRunning.value()))
|
||||
.td(String.format("%d", metrics.containersPending.value()))
|
||||
.td(String.format("%d", metrics.containersFailed.value()))
|
||||
.td(aatext)
|
||||
._();
|
||||
}
|
||||
|
@ -218,7 +214,7 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
DIV<Hamlet> provider_info = html.div("provider_info");
|
||||
provider_info.h3(providerName + " information");
|
||||
UL<Hamlet> ul = html.ul();
|
||||
addProviderServiceOptions(providerService, ul, clusterStatus);
|
||||
//TODO render app/cluster status
|
||||
ul._();
|
||||
provider_info._();
|
||||
|
||||
|
@ -250,40 +246,9 @@ public class IndexBlock extends SliderHamletBlock {
|
|||
}
|
||||
|
||||
private String getProviderName() {
|
||||
return providerService.getHumanName();
|
||||
return "docker";
|
||||
}
|
||||
|
||||
private String getInfoAvoidingNulls(String key) {
|
||||
String createTime = appState.getClusterStatus().getInfo(key);
|
||||
|
||||
return null == createTime ? "N/A" : createTime;
|
||||
}
|
||||
|
||||
protected void addProviderServiceOptions(ProviderService provider,
|
||||
UL ul, ClusterDescription clusterStatus) {
|
||||
Map<String, MonitorDetail> details = provider.buildMonitorDetails(
|
||||
clusterStatus);
|
||||
if (null == details) {
|
||||
return;
|
||||
}
|
||||
// Loop over each entry, placing the text in the UL, adding an anchor when the URL is non-null/empty
|
||||
for (Entry<String, MonitorDetail> entry : details.entrySet()) {
|
||||
MonitorDetail detail = entry.getValue();
|
||||
if (SliderUtils.isSet(detail.getValue()) ) {
|
||||
LI item = ul.li();
|
||||
item.span().$class("bold")._(entry.getKey())._();
|
||||
item._(" - ");
|
||||
if (detail.isUrl()) {
|
||||
// Render an anchor if the value is a URL
|
||||
item.a(detail.getValue(), detail.getValue())._();
|
||||
} else {
|
||||
item._(detail.getValue())._();
|
||||
}
|
||||
} else {
|
||||
ul.li(entry.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void enumeratePublishedExports(PublishedExportsSet exports, UL<Hamlet> ul) {
|
||||
for(String key : exports.keys()) {
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.server.servicemonitor;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.slider.client.SliderYarnClientImpl;
|
||||
import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Probe for YARN application
|
||||
*/
|
||||
public class YarnApplicationProbe extends Probe {
|
||||
protected static final Logger log = LoggerFactory.getLogger(
|
||||
YarnApplicationProbe.class);
|
||||
|
||||
/**
|
||||
* Yarn client service
|
||||
*/
|
||||
private SliderYarnClientImpl yarnClient;
|
||||
private final String clustername;
|
||||
private final String username;
|
||||
|
||||
public YarnApplicationProbe(String clustername,
|
||||
SliderYarnClientImpl yarnClient,
|
||||
String name,
|
||||
Configuration conf, String username)
|
||||
throws IOException {
|
||||
super("Port probe " + name + " " + clustername,
|
||||
conf);
|
||||
this.clustername = clustername;
|
||||
this.yarnClient = yarnClient;
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void init() throws IOException {
|
||||
|
||||
log.info("Checking " + clustername );
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to connect to the (host,port); a failure to connect within
|
||||
* the specified timeout is a failure
|
||||
* @param livePing is the ping live: true for live; false for boot time
|
||||
* @return the outcome
|
||||
*/
|
||||
@Override
|
||||
public ProbeStatus ping(boolean livePing) {
|
||||
ProbeStatus status = new ProbeStatus();
|
||||
try {
|
||||
List<ApplicationReport> instances = yarnClient
|
||||
.listDeployedInstances(username, null, clustername);
|
||||
ApplicationReport instance = yarnClient
|
||||
.findClusterInInstanceList(instances, clustername);
|
||||
if (null == instance) {
|
||||
throw UnknownApplicationInstanceException.unknownInstance(clustername);
|
||||
}
|
||||
status.succeed(this);
|
||||
} catch (Exception e) {
|
||||
status.fail(this, e);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
}
|
|
@ -210,8 +210,6 @@ public class YarnRegistryViewForProviders {
|
|||
|
||||
/**
|
||||
* Add a service under a path for the current user
|
||||
* @param serviceClass service class to use under ~user
|
||||
* @param serviceName name of the service
|
||||
* @param record service record
|
||||
* @param deleteTreeFirst perform recursive delete of the path first
|
||||
* @return the path the service was created at
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.services.utils;
|
||||
package org.apache.slider.util;
|
||||
|
||||
public interface RestApiConstants {
|
||||
String CONTEXT_ROOT = "/services/v1";
|
|
@ -15,7 +15,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.services.utils;
|
||||
package org.apache.slider.util;
|
||||
|
||||
public interface RestApiErrorMessages {
|
||||
String ERROR_APPLICATION_NAME_INVALID =
|
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.util;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.slider.api.resource.Application;
|
||||
import org.apache.slider.api.resource.Artifact;
|
||||
import org.apache.slider.api.resource.Component;
|
||||
import org.apache.slider.api.resource.Configuration;
|
||||
import org.apache.slider.api.resource.Resource;
|
||||
import org.apache.slider.common.tools.SliderUtils;
|
||||
|
||||
public class ServiceApiUtil {
|
||||
|
||||
@VisibleForTesting
|
||||
public static void validateApplicationPostPayload(Application application) {
|
||||
if (StringUtils.isEmpty(application.getName())) {
|
||||
throw new IllegalArgumentException(
|
||||
RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
|
||||
}
|
||||
if (!SliderUtils.isClusternameValid(application.getName())) {
|
||||
throw new IllegalArgumentException(
|
||||
RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID_FORMAT);
|
||||
}
|
||||
|
||||
// If the application has no components do top-level checks
|
||||
if (!hasComponent(application)) {
|
||||
// artifact
|
||||
if (application.getArtifact() == null) {
|
||||
throw new IllegalArgumentException(
|
||||
RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
|
||||
}
|
||||
if (StringUtils.isEmpty(application.getArtifact().getId())) {
|
||||
throw new IllegalArgumentException(
|
||||
RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
|
||||
}
|
||||
|
||||
// If artifact is of type APPLICATION, add a slider specific property
|
||||
if (application.getArtifact().getType()
|
||||
== Artifact.TypeEnum.APPLICATION) {
|
||||
if (application.getConfiguration() == null) {
|
||||
application.setConfiguration(new Configuration());
|
||||
}
|
||||
}
|
||||
// resource
|
||||
validateApplicationResource(application.getResource(), null,
|
||||
application.getArtifact().getType());
|
||||
|
||||
// container size
|
||||
if (application.getNumberOfContainers() == null) {
|
||||
throw new IllegalArgumentException(
|
||||
RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID);
|
||||
}
|
||||
|
||||
// Since it is a simple app with no components, create a default component
|
||||
application.getComponents().add(createDefaultComponent(application));
|
||||
} else {
|
||||
// If the application has components, then run checks for each component.
|
||||
// Let global values take effect if component level values are not
|
||||
// provided.
|
||||
Artifact globalArtifact = application.getArtifact();
|
||||
Resource globalResource = application.getResource();
|
||||
Long globalNumberOfContainers = application.getNumberOfContainers();
|
||||
for (Component comp : application.getComponents()) {
|
||||
// artifact
|
||||
if (comp.getArtifact() == null) {
|
||||
comp.setArtifact(globalArtifact);
|
||||
}
|
||||
// If still null raise validation exception
|
||||
if (comp.getArtifact() == null) {
|
||||
throw new IllegalArgumentException(String
|
||||
.format(RestApiErrorMessages.ERROR_ARTIFACT_FOR_COMP_INVALID,
|
||||
comp.getName()));
|
||||
}
|
||||
if (StringUtils.isEmpty(comp.getArtifact().getId())) {
|
||||
throw new IllegalArgumentException(String
|
||||
.format(RestApiErrorMessages.ERROR_ARTIFACT_ID_FOR_COMP_INVALID,
|
||||
comp.getName()));
|
||||
}
|
||||
|
||||
// If artifact is of type APPLICATION, add a slider specific property
|
||||
if (comp.getArtifact().getType() == Artifact.TypeEnum.APPLICATION) {
|
||||
if (comp.getConfiguration() == null) {
|
||||
comp.setConfiguration(new Configuration());
|
||||
}
|
||||
comp.setName(comp.getArtifact().getId());
|
||||
}
|
||||
|
||||
// resource
|
||||
if (comp.getResource() == null) {
|
||||
comp.setResource(globalResource);
|
||||
}
|
||||
validateApplicationResource(comp.getResource(), comp,
|
||||
comp.getArtifact().getType());
|
||||
|
||||
// container count
|
||||
if (comp.getNumberOfContainers() == null) {
|
||||
comp.setNumberOfContainers(globalNumberOfContainers);
|
||||
}
|
||||
if (comp.getNumberOfContainers() == null) {
|
||||
throw new IllegalArgumentException(String.format(
|
||||
RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID,
|
||||
comp.getName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Application lifetime if not specified, is set to unlimited lifetime
|
||||
if (application.getLifetime() == null) {
|
||||
application.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateApplicationResource(Resource resource,
|
||||
Component comp, Artifact.TypeEnum artifactType) {
|
||||
// Only apps/components of type APPLICATION can skip resource requirement
|
||||
if (resource == null && artifactType == Artifact.TypeEnum.APPLICATION) {
|
||||
return;
|
||||
}
|
||||
if (resource == null) {
|
||||
throw new IllegalArgumentException(
|
||||
comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String
|
||||
.format(RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID,
|
||||
comp.getName()));
|
||||
}
|
||||
// One and only one of profile OR cpus & memory can be specified. Specifying
|
||||
// both raises validation error.
|
||||
if (StringUtils.isNotEmpty(resource.getProfile()) && (
|
||||
resource.getCpus() != null || StringUtils
|
||||
.isNotEmpty(resource.getMemory()))) {
|
||||
throw new IllegalArgumentException(comp == null ?
|
||||
RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED :
|
||||
String.format(
|
||||
RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED,
|
||||
comp.getName()));
|
||||
}
|
||||
// Currently resource profile is not supported yet, so we will raise
|
||||
// validation error if only resource profile is specified
|
||||
if (StringUtils.isNotEmpty(resource.getProfile())) {
|
||||
throw new IllegalArgumentException(
|
||||
RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET);
|
||||
}
|
||||
|
||||
String memory = resource.getMemory();
|
||||
Integer cpus = resource.getCpus();
|
||||
if (StringUtils.isEmpty(memory)) {
|
||||
throw new IllegalArgumentException(
|
||||
comp == null ? RestApiErrorMessages.ERROR_RESOURCE_MEMORY_INVALID :
|
||||
String.format(
|
||||
RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID,
|
||||
comp.getName()));
|
||||
}
|
||||
if (cpus == null) {
|
||||
throw new IllegalArgumentException(
|
||||
comp == null ? RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID :
|
||||
String.format(
|
||||
RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID,
|
||||
comp.getName()));
|
||||
}
|
||||
if (cpus <= 0) {
|
||||
throw new IllegalArgumentException(comp == null ?
|
||||
RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID_RANGE : String
|
||||
.format(
|
||||
RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE,
|
||||
comp.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean hasComponent(Application application) {
|
||||
if (application.getComponents() == null || application.getComponents()
|
||||
.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static Component createDefaultComponent(Application app) {
|
||||
Component comp = new Component();
|
||||
comp.setName(RestApiConstants.DEFAULT_COMPONENT_NAME);
|
||||
comp.setArtifact(app.getArtifact());
|
||||
comp.setResource(app.getResource());
|
||||
comp.setNumberOfContainers(app.getNumberOfContainers());
|
||||
comp.setLaunchCommand(app.getLaunchCommand());
|
||||
return comp;
|
||||
}
|
||||
}
|
|
@ -80,22 +80,14 @@ message UpgradeContainersRequestProto {
|
|||
message UpgradeContainersResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* flex the cluster
|
||||
*/
|
||||
message FlexClusterRequestProto {
|
||||
required string clusterSpec = 1;
|
||||
message FlexComponentRequestProto {
|
||||
optional string name = 1;
|
||||
optional int32 numberOfContainers = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* flex the cluster
|
||||
*/
|
||||
message FlexClusterResponseProto {
|
||||
required bool response = 1;
|
||||
message FlexComponentResponseProto {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* void request
|
||||
*/
|
||||
|
|
|
@ -61,11 +61,7 @@ service SliderClusterProtocolPB {
|
|||
rpc upgradeContainers(UpgradeContainersRequestProto)
|
||||
returns(UpgradeContainersResponseProto);
|
||||
|
||||
/**
|
||||
* Flex the cluster.
|
||||
*/
|
||||
rpc flexCluster(FlexClusterRequestProto)
|
||||
returns(FlexClusterResponseProto);
|
||||
rpc flexComponent(FlexComponentRequestProto) returns (FlexComponentResponseProto);
|
||||
|
||||
/**
|
||||
* Get the current cluster status
|
||||
|
@ -73,12 +69,6 @@ service SliderClusterProtocolPB {
|
|||
rpc getJSONClusterStatus(GetJSONClusterStatusRequestProto)
|
||||
returns(GetJSONClusterStatusResponseProto);
|
||||
|
||||
/**
|
||||
* Get the instance definition
|
||||
*/
|
||||
rpc getInstanceDefinition(GetInstanceDefinitionRequestProto)
|
||||
returns(GetInstanceDefinitionResponseProto);
|
||||
|
||||
/**
|
||||
* List all running nodes in a role
|
||||
*/
|
||||
|
|
|
@ -23,10 +23,6 @@
|
|||
<name>slider.config.loaded</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>slider.provider.agent</name>
|
||||
<value>org.apache.slider.providers.agent.AgentProviderFactory</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>slider.provider.docker</name>
|
||||
<value>org.apache.slider.providers.docker.DockerProviderFactory</value>
|
||||
|
|
|
@ -1,157 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.core.launch;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
|
||||
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
|
||||
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
|
||||
import org.apache.slider.api.ResourceKeys;
|
||||
import org.apache.slider.client.SliderYarnClientImpl;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.easymock.EasyMock;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestAppMasterLauncher {
|
||||
SliderYarnClientImpl mockYarnClient;
|
||||
YarnClientApplication yarnClientApp;
|
||||
ApplicationSubmissionContext appSubmissionContext;
|
||||
Set<String> tags = Collections.emptySet();
|
||||
AppMasterLauncher appMasterLauncher = null;
|
||||
boolean isOldApi = true;
|
||||
Method rolledLogsIncludeMethod = null;
|
||||
Method rolledLogsExcludeMethod = null;
|
||||
|
||||
@Before
|
||||
public void initialize() throws Exception {
|
||||
mockYarnClient = EasyMock.createNiceMock(SliderYarnClientImpl.class);
|
||||
yarnClientApp = EasyMock.createNiceMock(YarnClientApplication.class);
|
||||
appSubmissionContext = EasyMock
|
||||
.createNiceMock(ApplicationSubmissionContext.class);
|
||||
EasyMock.expect(yarnClientApp.getApplicationSubmissionContext())
|
||||
.andReturn(appSubmissionContext).once();
|
||||
EasyMock.expect(mockYarnClient.createApplication())
|
||||
.andReturn(yarnClientApp).once();
|
||||
|
||||
try {
|
||||
LogAggregationContext.class.getMethod("newInstance", String.class,
|
||||
String.class, String.class, String.class);
|
||||
isOldApi = false;
|
||||
rolledLogsIncludeMethod = LogAggregationContext.class
|
||||
.getMethod("getRolledLogsIncludePattern");
|
||||
rolledLogsExcludeMethod = LogAggregationContext.class
|
||||
.getMethod("getRolledLogsExcludePattern");
|
||||
} catch (Exception e) {
|
||||
isOldApi = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* These tests will probably fail when compiled against hadoop 2.7+. Please
|
||||
* refer to SLIDER-810. It has been purposely not modified so that it fails
|
||||
* and that someone needs to modify the code in
|
||||
* {@code AbstractLauncher#extractLogAggregationContext(Map)}. Comments are
|
||||
* provided in that method as to what needs to be done.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testExtractLogAggregationContext() throws Exception {
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
options.put(ResourceKeys.YARN_LOG_INCLUDE_PATTERNS,
|
||||
" | slider*.txt |agent.out| |");
|
||||
options.put(ResourceKeys.YARN_LOG_EXCLUDE_PATTERNS,
|
||||
"command*.json| agent.log* | ");
|
||||
|
||||
EasyMock.replay(mockYarnClient, appSubmissionContext, yarnClientApp);
|
||||
appMasterLauncher = new AppMasterLauncher("cl1", SliderKeys.APP_TYPE, null,
|
||||
null, mockYarnClient, false, null, options, tags, null);
|
||||
|
||||
// Verify the include/exclude patterns
|
||||
String expectedInclude = "slider*.txt|agent.out";
|
||||
String expectedExclude = "command*.json|agent.log*";
|
||||
assertPatterns(expectedInclude, expectedExclude);
|
||||
|
||||
EasyMock.verify(mockYarnClient, appSubmissionContext, yarnClientApp);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExtractLogAggregationContextEmptyIncludePattern()
|
||||
throws Exception {
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
options.put(ResourceKeys.YARN_LOG_INCLUDE_PATTERNS, " ");
|
||||
options.put(ResourceKeys.YARN_LOG_EXCLUDE_PATTERNS,
|
||||
"command*.json| agent.log* | ");
|
||||
|
||||
EasyMock.replay(mockYarnClient, appSubmissionContext, yarnClientApp);
|
||||
appMasterLauncher = new AppMasterLauncher("cl1", SliderKeys.APP_TYPE, null,
|
||||
null, mockYarnClient, false, null, options, tags, null);
|
||||
|
||||
// Verify the include/exclude patterns
|
||||
String expectedInclude = isOldApi ? "" : ".*";
|
||||
String expectedExclude = "command*.json|agent.log*";
|
||||
assertPatterns(expectedInclude, expectedExclude);
|
||||
|
||||
EasyMock.verify(mockYarnClient, appSubmissionContext, yarnClientApp);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExtractLogAggregationContextEmptyIncludeAndExcludePattern()
|
||||
throws Exception {
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
options.put(ResourceKeys.YARN_LOG_INCLUDE_PATTERNS, "");
|
||||
options.put(ResourceKeys.YARN_LOG_EXCLUDE_PATTERNS, " ");
|
||||
|
||||
EasyMock.replay(mockYarnClient, appSubmissionContext, yarnClientApp);
|
||||
appMasterLauncher = new AppMasterLauncher("cl1", SliderKeys.APP_TYPE, null,
|
||||
null, mockYarnClient, false, null, options, tags, null);
|
||||
|
||||
// Verify the include/exclude patterns
|
||||
String expectedInclude = isOldApi ? "" : ".*";
|
||||
String expectedExclude = "";
|
||||
assertPatterns(expectedInclude, expectedExclude);
|
||||
|
||||
EasyMock.verify(mockYarnClient, appSubmissionContext, yarnClientApp);
|
||||
}
|
||||
|
||||
private void assertPatterns(String expectedIncludePattern,
|
||||
String expectedExcludePattern) throws Exception {
|
||||
if (isOldApi) {
|
||||
Assert.assertEquals(expectedIncludePattern,
|
||||
appMasterLauncher.logAggregationContext.getIncludePattern());
|
||||
Assert.assertEquals(expectedExcludePattern,
|
||||
appMasterLauncher.logAggregationContext.getExcludePattern());
|
||||
} else {
|
||||
Assert.assertEquals(expectedIncludePattern,
|
||||
(String) rolledLogsIncludeMethod
|
||||
.invoke(appMasterLauncher.logAggregationContext));
|
||||
Assert.assertEquals(expectedExcludePattern,
|
||||
(String) rolledLogsExcludeMethod
|
||||
.invoke(appMasterLauncher.logAggregationContext));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.slider.core.launch;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
|
||||
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import org.apache.slider.api.ResourceKeys;
|
||||
import org.apache.slider.client.SliderYarnClientImpl;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.easymock.EasyMock;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestAppMasterLauncherWithAmReset {
|
||||
SliderYarnClientImpl mockYarnClient;
|
||||
YarnClientApplication yarnClientApp;
|
||||
ApplicationSubmissionContext appSubmissionContext;
|
||||
GetNewApplicationResponse newApp;
|
||||
Set<String> tags = Collections.emptySet();
|
||||
AppMasterLauncher appMasterLauncher = null;
|
||||
boolean isOldApi = true;
|
||||
|
||||
@Before
|
||||
public void initialize() throws Exception {
|
||||
mockYarnClient = EasyMock.createNiceMock(SliderYarnClientImpl.class);
|
||||
yarnClientApp = EasyMock.createNiceMock(YarnClientApplication.class);
|
||||
newApp = EasyMock.createNiceMock(GetNewApplicationResponse.class);
|
||||
EasyMock.expect(mockYarnClient.createApplication())
|
||||
.andReturn(new YarnClientApplication(newApp,
|
||||
Records.newRecord(ApplicationSubmissionContext.class)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExtractYarnResourceManagerAmRetryCountWindowMs() throws
|
||||
Exception {
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
final String expectedInterval = Integer.toString (120000);
|
||||
options.put(ResourceKeys.YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS,
|
||||
expectedInterval);
|
||||
EasyMock.replay(mockYarnClient, yarnClientApp);
|
||||
|
||||
appMasterLauncher = new AppMasterLauncher("am1", SliderKeys.APP_TYPE, null,
|
||||
null, mockYarnClient, false, null, options, tags, null);
|
||||
|
||||
ApplicationSubmissionContext ctx = appMasterLauncher.application
|
||||
.getApplicationSubmissionContext();
|
||||
String retryIntervalWindow = Long.toString(ctx
|
||||
.getAttemptFailuresValidityInterval());
|
||||
Assert.assertEquals(expectedInterval, retryIntervalWindow);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testExtractYarnResourceManagerAmRetryCountWindowMsDefaultValue()
|
||||
throws Exception {
|
||||
Map<String, String> options = new HashMap<String, String>();
|
||||
EasyMock.replay(mockYarnClient, yarnClientApp);
|
||||
|
||||
appMasterLauncher = new AppMasterLauncher("am1", SliderKeys.APP_TYPE, null,
|
||||
null, mockYarnClient, false, null, options, tags, null);
|
||||
|
||||
ApplicationSubmissionContext ctx = appMasterLauncher.application
|
||||
.getApplicationSubmissionContext();
|
||||
long retryIntervalWindow = ctx.getAttemptFailuresValidityInterval();
|
||||
Assert.assertEquals(ResourceKeys.DEFAULT_AM_RETRY_COUNT_WINDOW_MS,
|
||||
retryIntervalWindow);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.slider.server.appmaster;
|
||||
|
||||
import org.apache.hadoop.registry.client.types.ServiceRecord;
|
||||
import org.apache.slider.common.SliderKeys;
|
||||
import org.apache.slider.core.conf.MapOperations;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TestServiceRecordAttributes extends Assert {
|
||||
|
||||
@Test
|
||||
public void testAppConfigProvidedServiceRecordAttributes() throws Exception {
|
||||
Map<String, String> options = new HashMap<>();
|
||||
options.put("slider.some.arbitrary.option", "arbitrary value");
|
||||
options.put("service.record.attribute.one_attribute", "one_attribute_value");
|
||||
options.put("service.record.attribute.second_attribute", "second_attribute_value");
|
||||
MapOperations serviceProps = new MapOperations(SliderKeys.COMPONENT_AM, options);
|
||||
options = new HashMap<>();
|
||||
options.put("some.component.attribute", "component_attribute_value");
|
||||
options.put("service.record.attribute.component_attribute", "component_attribute_value");
|
||||
MapOperations compProps = new MapOperations("TEST_COMP", options);
|
||||
|
||||
SliderAppMaster appMaster = new SliderAppMaster();
|
||||
|
||||
ServiceRecord appServiceRecord = new ServiceRecord();
|
||||
|
||||
appMaster.setProvidedServiceRecordAttributes(serviceProps, appServiceRecord);
|
||||
|
||||
assertNull("property should not be attribute",
|
||||
appServiceRecord.get("slider.some.arbitrary.option"));
|
||||
assertEquals("wrong value", "one_attribute_value",
|
||||
appServiceRecord.get("one_attribute"));
|
||||
assertEquals("wrong value", "second_attribute_value",
|
||||
appServiceRecord.get("second_attribute"));
|
||||
|
||||
ServiceRecord compServiceRecord = new ServiceRecord();
|
||||
|
||||
appMaster.setProvidedServiceRecordAttributes(compProps, compServiceRecord);
|
||||
|
||||
assertNull("should not be attribute",
|
||||
compServiceRecord.get("some.component.attribute"));
|
||||
assertEquals("wrong value", "component_attribute_value",
|
||||
compServiceRecord.get("component_attribute"));
|
||||
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue