YARN-6394. Support specifying YARN related params in the service REST API. Contributed by Jian He

This commit is contained in:
Billie Rinaldi 2017-11-03 11:25:57 -07:00 committed by Jian He
parent a288b0accf
commit a6c4bd74b6
32 changed files with 384 additions and 169 deletions

View File

@ -176,14 +176,14 @@ POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hadoop/conf/core-site.xml", "dest_file": "/etc/hadoop/conf/core-site.xml",
"props": { "properties": {
"fs.defaultFS": "${CLUSTER_FS_URI}" "fs.defaultFS": "${CLUSTER_FS_URI}"
} }
}, },
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hbase/conf/hbase-site.xml", "dest_file": "/etc/hbase/conf/hbase-site.xml",
"props": { "properties": {
"hbase.cluster.distributed": "true", "hbase.cluster.distributed": "true",
"hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
"hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
@ -216,14 +216,14 @@ POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hadoop/conf/core-site.xml", "dest_file": "/etc/hadoop/conf/core-site.xml",
"props": { "properties": {
"fs.defaultFS": "${CLUSTER_FS_URI}" "fs.defaultFS": "${CLUSTER_FS_URI}"
} }
}, },
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hbase/conf/hbase-site.xml", "dest_file": "/etc/hbase/conf/hbase-site.xml",
"props": { "properties": {
"hbase.cluster.distributed": "true", "hbase.cluster.distributed": "true",
"hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
"hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",

View File

@ -340,7 +340,7 @@ definitions:
enum: enum:
- HTTP - HTTP
- PORT - PORT
props: properties:
type: object type: object
description: A blob of key value pairs that will be used to configure the check. description: A blob of key value pairs that will be used to configure the check.
additionalProperties: additionalProperties:
@ -353,7 +353,7 @@ definitions:
properties: properties:
properties: properties:
type: object type: object
description: A blob of key-value pairs of common service properties. description: A blob of key-value pairs for configuring the YARN service AM
additionalProperties: additionalProperties:
type: string type: string
env: env:
@ -386,7 +386,7 @@ definitions:
src_file: src_file:
type: string type: string
description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported. description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.
props: properties:
type: object type: object
description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file. description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.
Container: Container:

View File

@ -16,16 +16,16 @@
}, },
"readiness_check": { "readiness_check": {
"type": "HTTP", "type": "HTTP",
"props": { "properties": {
"url": "http://${THIS_HOST}:8080" "url": "http://${THIS_HOST}:8080"
} }
}, },
"configuration": { "configuration": {
"files": [ "files": [
{ {
"type": "ENV", "type": "TEMPLATE",
"dest_file": "/var/www/html/index.html", "dest_file": "/var/www/html/index.html",
"props": { "properties": {
"content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>" "content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>"
} }
} }

View File

@ -17,9 +17,9 @@
"configuration": { "configuration": {
"files": [ "files": [
{ {
"type": "ENV", "type": "TEMPLATE",
"dest_file": "/var/www/html/index.html", "dest_file": "/var/www/html/index.html",
"props": { "properties": {
"content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>" "content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>"
} }
} }

View File

@ -78,6 +78,12 @@ public class ServiceMaster extends CompositeService {
fs.setAppDir(appDir); fs.setAppDir(appDir);
loadApplicationJson(context, fs); loadApplicationJson(context, fs);
// Take yarn config from YarnFile and merge them into YarnConfiguration
for (Map.Entry<String, String> entry : context.service
.getConfiguration().getProperties().entrySet()) {
conf.set(entry.getKey(), entry.getValue());
}
ContainerId amContainerId = getAMContainerId(); ContainerId amContainerId = getAMContainerId();
ApplicationAttemptId attemptId = amContainerId.getApplicationAttemptId(); ApplicationAttemptId attemptId = amContainerId.getApplicationAttemptId();

View File

@ -35,9 +35,6 @@ public class ServiceMetrics implements MetricsSource {
@Metric("containers requested") @Metric("containers requested")
public MutableGaugeInt containersRequested; public MutableGaugeInt containersRequested;
@Metric("anti-affinity containers pending")
public MutableGaugeInt pendingAAContainers;
@Metric("containers running") @Metric("containers running")
public MutableGaugeInt containersRunning; public MutableGaugeInt containersRunning;
@ -86,7 +83,6 @@ public class ServiceMetrics implements MetricsSource {
@Override public String toString() { @Override public String toString() {
return "ServiceMetrics{" return "ServiceMetrics{"
+ "containersRequested=" + containersRequested.value() + "containersRequested=" + containersRequested.value()
+ ", pendingAAContainers=" + pendingAAContainers.value()
+ ", containersRunning=" + containersRunning.value() + ", containersRunning=" + containersRunning.value()
+ ", containersDesired=" + containersDesired.value() + ", containersDesired=" + containersDesired.value()
+ ", containersSucceeded=" + containersSucceeded.value() + ", containersSucceeded=" + containersSucceeded.value()

View File

@ -397,10 +397,6 @@ public class ServiceScheduler extends CompositeService {
PersistencePolicies.APPLICATION); PersistencePolicies.APPLICATION);
serviceRecord.description = "YarnServiceMaster"; serviceRecord.description = "YarnServiceMaster";
// set any provided attributes
setUserProvidedServiceRecordAttributes(service.getConfiguration(),
serviceRecord);
executorService.submit(new Runnable() { executorService.submit(new Runnable() {
@Override public void run() { @Override public void run() {
try { try {
@ -426,17 +422,6 @@ public class ServiceScheduler extends CompositeService {
} }
} }
private void setUserProvidedServiceRecordAttributes(
org.apache.hadoop.yarn.service.api.records.Configuration conf, ServiceRecord record) {
String prefix = "service.record.attribute";
for (Map.Entry<String, String> entry : conf.getProperties().entrySet()) {
if (entry.getKey().startsWith(prefix)) {
String key = entry.getKey().substring(prefix.length() + 1);
record.set(key, entry.getValue().trim());
}
}
}
private void createAllComponents() { private void createAllComponents() {
long allocateId = 0; long allocateId = 0;

View File

@ -53,9 +53,9 @@ public interface ServiceApiConstants {
String CONTAINER_ID = $("CONTAINER_ID"); String CONTAINER_ID = $("CONTAINER_ID");
// Templates for component instance host/IP // Templates for component instance host/IP
String COMPONENT_HOST = $("%s_HOST"); String COMPONENT_INSTANCE_HOST = $("%s_HOST");
String COMPONENT_IP = $("%s_IP"); String COMPONENT_INSTANCE_IP = $("%s_IP");
// Constants for default cluster ZK // Constants for default cluster ZK
String CLUSTER_ZK_QUORUM = $("CLUSTER_ZK_QUORUM"); String CLUSTER_ZK_QUORUM = $("CLUSTER_ZK_QUORUM");

View File

@ -49,13 +49,13 @@ public class ConfigFile implements Serializable {
private static final long serialVersionUID = -7009402089417704612L; private static final long serialVersionUID = -7009402089417704612L;
/** /**
* Config Type. XML, JSON, YAML, TEMPLATE, ENV and HADOOP_XML are supported. * Config Type. XML, JSON, YAML, TEMPLATE and HADOOP_XML are supported.
**/ **/
@XmlType(name = "config_type") @XmlType(name = "config_type")
@XmlEnum @XmlEnum
public enum TypeEnum { public enum TypeEnum {
XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE( XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
"TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML"),; "TEMPLATE"), HADOOP_XML("HADOOP_XML"),;
private String value; private String value;
@ -73,15 +73,15 @@ public class ConfigFile implements Serializable {
private TypeEnum type = null; private TypeEnum type = null;
private String destFile = null; private String destFile = null;
private String srcFile = null; private String srcFile = null;
private Map<String, String> props = new HashMap<>(); private Map<String, String> properties = new HashMap<>();
public ConfigFile copy() { public ConfigFile copy() {
ConfigFile copy = new ConfigFile(); ConfigFile copy = new ConfigFile();
copy.setType(this.getType()); copy.setType(this.getType());
copy.setSrcFile(this.getSrcFile()); copy.setSrcFile(this.getSrcFile());
copy.setDestFile(this.getDestFile()); copy.setDestFile(this.getDestFile());
if (this.getProps() != null && !this.getProps().isEmpty()) { if (this.getProperties() != null && !this.getProperties().isEmpty()) {
copy.getProps().putAll(this.getProps()); copy.getProperties().putAll(this.getProperties());
} }
return copy; return copy;
} }
@ -156,28 +156,28 @@ public class ConfigFile implements Serializable {
in the dest_file and these properties will overwrite, if any, existing in the dest_file and these properties will overwrite, if any, existing
properties in src_file or be added as new properties in src_file. properties in src_file or be added as new properties in src_file.
**/ **/
public ConfigFile props(Map<String, String> props) { public ConfigFile properties(Map<String, String> properties) {
this.props = props; this.properties = properties;
return this; return this;
} }
@ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type." @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type."
+ " If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any," + " If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any,"
+ " existing properties in src_file or be added as new properties in src_file.") + " existing properties in src_file or be added as new properties in src_file.")
@JsonProperty("props") @JsonProperty("properties")
public Map<String, String> getProps() { public Map<String, String> getProperties() {
return props; return properties;
} }
public void setProps(Map<String, String> props) { public void setProperties(Map<String, String> properties) {
this.props = props; this.properties = properties;
} }
public long getLong(String name, long defaultValue) { public long getLong(String name, long defaultValue) {
if (name == null) { if (name == null) {
return defaultValue; return defaultValue;
} }
String value = props.get(name.trim()); String value = properties.get(name.trim());
return Long.parseLong(value); return Long.parseLong(value);
} }
@ -185,7 +185,7 @@ public class ConfigFile implements Serializable {
if (name == null) { if (name == null) {
return defaultValue; return defaultValue;
} }
return Boolean.valueOf(props.get(name.trim())); return Boolean.valueOf(properties.get(name.trim()));
} }
@Override @Override
@ -204,7 +204,7 @@ public class ConfigFile implements Serializable {
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(type, destFile, srcFile, props); return Objects.hash(type, destFile, srcFile, properties);
} }
@Override @Override
@ -215,7 +215,7 @@ public class ConfigFile implements Serializable {
sb.append(" type: ").append(toIndentedString(type)).append("\n"); sb.append(" type: ").append(toIndentedString(type)).append("\n");
sb.append(" destFile: ").append(toIndentedString(destFile)).append("\n"); sb.append(" destFile: ").append(toIndentedString(destFile)).append("\n");
sb.append(" srcFile: ").append(toIndentedString(srcFile)).append("\n"); sb.append(" srcFile: ").append(toIndentedString(srcFile)).append("\n");
sb.append(" props: ").append(toIndentedString(props)).append("\n"); sb.append(" properties: ").append(toIndentedString(properties)).append("\n");
sb.append("}"); sb.append("}");
return sb.toString(); return sb.toString();
} }

View File

@ -212,8 +212,8 @@ public class Configuration implements Serializable {
for (ConfigFile thisFile : files) { for (ConfigFile thisFile : files) {
if(thatMap.containsKey(thisFile.getDestFile())) { if(thatMap.containsKey(thisFile.getDestFile())) {
ConfigFile thatFile = thatMap.get(thisFile.getDestFile()); ConfigFile thatFile = thatMap.get(thisFile.getDestFile());
SliderUtils.mergeMapsIgnoreDuplicateKeys(thisFile.getProps(), SliderUtils.mergeMapsIgnoreDuplicateKeys(thisFile.getProperties(),
thatFile.getProps()); thatFile.getProperties());
thatMap.remove(thisFile.getDestFile()); thatMap.remove(thisFile.getDestFile());
} }
} }

View File

@ -69,7 +69,7 @@ public class ReadinessCheck implements Serializable {
} }
private TypeEnum type = null; private TypeEnum type = null;
private Map<String, String> props = new HashMap<String, String>(); private Map<String, String> properties = new HashMap<String, String>();
private Artifact artifact = null; private Artifact artifact = null;
/** /**
@ -91,27 +91,27 @@ public class ReadinessCheck implements Serializable {
this.type = type; this.type = type;
} }
public ReadinessCheck props(Map<String, String> props) { public ReadinessCheck properties(Map<String, String> properties) {
this.props = props; this.properties = properties;
return this; return this;
} }
public ReadinessCheck putPropsItem(String key, String propsItem) { public ReadinessCheck putPropsItem(String key, String propsItem) {
this.props.put(key, propsItem); this.properties.put(key, propsItem);
return this; return this;
} }
/** /**
* A blob of key value pairs that will be used to configure the check. * A blob of key value pairs that will be used to configure the check.
* @return props * @return properties
**/ **/
@ApiModelProperty(example = "null", value = "A blob of key value pairs that will be used to configure the check.") @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be used to configure the check.")
public Map<String, String> getProps() { public Map<String, String> getProperties() {
return props; return properties;
} }
public void setProps(Map<String, String> props) { public void setProperties(Map<String, String> properties) {
this.props = props; this.properties = properties;
} }
/** /**
@ -148,13 +148,13 @@ public class ReadinessCheck implements Serializable {
} }
ReadinessCheck readinessCheck = (ReadinessCheck) o; ReadinessCheck readinessCheck = (ReadinessCheck) o;
return Objects.equals(this.type, readinessCheck.type) && return Objects.equals(this.type, readinessCheck.type) &&
Objects.equals(this.props, readinessCheck.props) && Objects.equals(this.properties, readinessCheck.properties) &&
Objects.equals(this.artifact, readinessCheck.artifact); Objects.equals(this.artifact, readinessCheck.artifact);
} }
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(type, props, artifact); return Objects.hash(type, properties, artifact);
} }
@ -164,7 +164,7 @@ public class ReadinessCheck implements Serializable {
sb.append("class ReadinessCheck {\n"); sb.append("class ReadinessCheck {\n");
sb.append(" type: ").append(toIndentedString(type)).append("\n"); sb.append(" type: ").append(toIndentedString(type)).append("\n");
sb.append(" props: ").append(toIndentedString(props)).append("\n"); sb.append(" properties: ").append(toIndentedString(properties)).append("\n");
sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n");
sb.append("}"); sb.append("}");
return sb.toString(); return sb.toString();

View File

@ -38,17 +38,8 @@ import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.*;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ApplicationTimeout;
import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.AppAdminClient; import org.apache.hadoop.yarn.client.api.AppAdminClient;
import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.client.api.YarnClientApplication;
@ -99,7 +90,7 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import static org.apache.hadoop.yarn.api.records.YarnApplicationState.*; import static org.apache.hadoop.yarn.api.records.YarnApplicationState.*;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_QUEUE; import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser; import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser;
import static org.apache.hadoop.yarn.service.utils.SliderUtils.*; import static org.apache.hadoop.yarn.service.utils.SliderUtils.*;
@ -553,8 +544,11 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
appTimeout.put(ApplicationTimeoutType.LIFETIME, app.getLifetime()); appTimeout.put(ApplicationTimeoutType.LIFETIME, app.getLifetime());
submissionContext.setApplicationTimeouts(appTimeout); submissionContext.setApplicationTimeouts(appTimeout);
} }
submissionContext.setMaxAppAttempts(conf.getInt( submissionContext.setMaxAppAttempts(YarnServiceConf
YarnServiceConf.AM_RESTART_MAX, 2)); .getInt(YarnServiceConf.AM_RESTART_MAX, 20, app.getConfiguration(),
conf));
setLogAggregationContext(app, conf, submissionContext);
Map<String, LocalResource> localResources = new HashMap<>(); Map<String, LocalResource> localResources = new HashMap<>();
@ -568,14 +562,14 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
printLocalResources(localResources); printLocalResources(localResources);
} }
Map<String, String> env = addAMEnv(conf); Map<String, String> env = addAMEnv();
// create AM CLI // create AM CLI
String cmdStr = String cmdStr = buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j);
buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j);
submissionContext.setResource(Resource.newInstance(YarnServiceConf submissionContext.setResource(Resource.newInstance(YarnServiceConf
.getLong(YarnServiceConf.AM_RESOURCE_MEM, YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, .getLong(YarnServiceConf.AM_RESOURCE_MEM,
app.getConfiguration(), conf), 1)); YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, app.getConfiguration(),
conf), 1));
String queue = app.getQueue(); String queue = app.getQueue();
if (StringUtils.isEmpty(queue)) { if (StringUtils.isEmpty(queue)) {
queue = conf.get(YARN_QUEUE, "default"); queue = conf.get(YARN_QUEUE, "default");
@ -598,6 +592,33 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
return submissionContext.getApplicationId(); return submissionContext.getApplicationId();
} }
private void setLogAggregationContext(Service app, Configuration conf,
ApplicationSubmissionContext submissionContext) {
LogAggregationContext context = Records.newRecord(LogAggregationContext
.class);
String finalLogInclude = YarnServiceConf.get
(FINAL_LOG_INCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(finalLogInclude)) {
context.setIncludePattern(finalLogInclude);
}
String finalLogExclude = YarnServiceConf.get
(FINAL_LOG_EXCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(finalLogExclude)) {
context.setExcludePattern(finalLogExclude);
}
String rollingLogInclude = YarnServiceConf.get
(ROLLING_LOG_INCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(rollingLogInclude)) {
context.setRolledLogsIncludePattern(rollingLogInclude);
}
String rollingLogExclude = YarnServiceConf.get
(ROLLING_LOG_EXCLUSION_PATTERN, null, app.getConfiguration(), conf);
if (!StringUtils.isEmpty(rollingLogExclude)) {
context.setRolledLogsExcludePattern(rollingLogExclude);
}
submissionContext.setLogAggregationContext(context);
}
private void printLocalResources(Map<String, LocalResource> map) { private void printLocalResources(Map<String, LocalResource> map) {
LOG.debug("Added LocalResource for localization: "); LOG.debug("Added LocalResource for localization: ");
StringBuilder builder = new StringBuilder(); StringBuilder builder = new StringBuilder();
@ -635,7 +656,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
return cmdStr; return cmdStr;
} }
private Map<String, String> addAMEnv(Configuration conf) throws IOException { private Map<String, String> addAMEnv() throws IOException {
Map<String, String> env = new HashMap<>(); Map<String, String> env = new HashMap<>();
ClasspathConstructor classpath = ClasspathConstructor classpath =
buildClasspath(YarnServiceConstants.SUBMITTED_CONF_DIR, "lib", fs, getConfig() buildClasspath(YarnServiceConstants.SUBMITTED_CONF_DIR, "lib", fs, getConfig()

View File

@ -116,7 +116,7 @@ public class Component implements EventHandler<ComponentEvent> {
.addTransition(FLEXING, FLEXING, CONTAINER_COMPLETED, .addTransition(FLEXING, FLEXING, CONTAINER_COMPLETED,
new ContainerCompletedTransition()) new ContainerCompletedTransition())
// Flex while previous flex is still in progress // Flex while previous flex is still in progress
.addTransition(FLEXING, EnumSet.of(FLEXING), FLEX, .addTransition(FLEXING, EnumSet.of(FLEXING, STABLE), FLEX,
new FlexComponentTransition()) new FlexComponentTransition())
// container failed while stable // container failed while stable
@ -214,14 +214,12 @@ public class Component implements EventHandler<ComponentEvent> {
new ArrayList<>(component.getAllComponentInstances()); new ArrayList<>(component.getAllComponentInstances());
// sort in Most recent -> oldest order, destroy most recent ones. // sort in Most recent -> oldest order, destroy most recent ones.
Collections.sort(list, Collections.reverseOrder()); list.sort(Collections.reverseOrder());
for (int i = 0; i < delta; i++) { for (int i = 0; i < delta; i++) {
ComponentInstance instance = list.get(i); ComponentInstance instance = list.get(i);
// remove the instance // remove the instance
component.compInstances.remove(instance.getCompInstanceName()); component.compInstances.remove(instance.getCompInstanceName());
component.pendingInstances.remove(instance); component.pendingInstances.remove(instance);
component.componentMetrics.containersFailed.incr();
component.componentMetrics.containersRunning.decr();
// decrement id counter // decrement id counter
component.instanceIdCounter.decrementAndGet(); component.instanceIdCounter.decrementAndGet();
instance.destroy(); instance.destroy();
@ -282,7 +280,6 @@ public class Component implements EventHandler<ComponentEvent> {
component.compInstanceDispatcher.getEventHandler().handle( component.compInstanceDispatcher.getEventHandler().handle(
new ComponentInstanceEvent(instance.getContainerId(), new ComponentInstanceEvent(instance.getContainerId(),
START)); START));
component.incRunningContainers();
} }
} }
@ -294,7 +291,6 @@ public class Component implements EventHandler<ComponentEvent> {
component.compInstanceDispatcher.getEventHandler().handle( component.compInstanceDispatcher.getEventHandler().handle(
new ComponentInstanceEvent(event.getInstance().getContainerId(), new ComponentInstanceEvent(event.getInstance().getContainerId(),
START)); START));
component.incRunningContainers();
return checkIfStable(component); return checkIfStable(component);
} }
} }
@ -413,9 +409,6 @@ public class Component implements EventHandler<ComponentEvent> {
componentMetrics.containersFailed.incr(); componentMetrics.containersFailed.incr();
scheduler.getServiceMetrics().containersFailed.incr(); scheduler.getServiceMetrics().containersFailed.incr();
// dec running container
decRunningContainers();
if (Apps.shouldCountTowardsNodeBlacklisting(status.getExitStatus())) { if (Apps.shouldCountTowardsNodeBlacklisting(status.getExitStatus())) {
String host = scheduler.getLiveInstances().get(status.getContainerId()) String host = scheduler.getLiveInstances().get(status.getContainerId())
.getNodeId().getHost(); .getNodeId().getHost();
@ -468,31 +461,33 @@ public class Component implements EventHandler<ComponentEvent> {
} }
String ip = instance.getContainerStatus().getIPs().get(0); String ip = instance.getContainerStatus().getIPs().get(0);
String host = instance.getContainerStatus().getHost(); String host = instance.getContainerStatus().getHost();
tokens.put(String.format(COMPONENT_IP, tokens.put(String.format(COMPONENT_INSTANCE_IP,
instance.getCompInstanceName().toUpperCase()), ip); instance.getCompInstanceName().toUpperCase()), ip);
tokens.put(String.format(COMPONENT_HOST, tokens.put(String.format(COMPONENT_INSTANCE_HOST,
instance.getCompInstanceName().toUpperCase()), host); instance.getCompInstanceName().toUpperCase()), host);
} }
} }
return tokens; return tokens;
} }
private void incRunningContainers() { public void incRunningContainers() {
componentMetrics.containersRunning.incr(); componentMetrics.containersRunning.incr();
scheduler.getServiceMetrics().containersRunning.incr(); scheduler.getServiceMetrics().containersRunning.incr();
} }
public void decRunningContainers() {
componentMetrics.containersRunning.decr();
scheduler.getServiceMetrics().containersRunning.decr();
}
public void incContainersReady() { public void incContainersReady() {
componentMetrics.containersReady.incr(); componentMetrics.containersReady.incr();
scheduler.getServiceMetrics().containersReady.incr();
} }
public void decContainersReady() { public void decContainersReady() {
componentMetrics.containersReady.decr(); componentMetrics.containersReady.decr();
} scheduler.getServiceMetrics().containersReady.decr();
private void decRunningContainers() {
componentMetrics.containersRunning.decr();
scheduler.getServiceMetrics().containersRunning.decr();
} }
public int getNumReadyInstances() { public int getNumReadyInstances() {

View File

@ -148,7 +148,7 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
new ContainerStatusRetriever(compInstance.scheduler, new ContainerStatusRetriever(compInstance.scheduler,
compInstance.getContainerId(), compInstance), 0, 1, compInstance.getContainerId(), compInstance), 0, 1,
TimeUnit.SECONDS); TimeUnit.SECONDS);
compInstance.component.incRunningContainers();
long containerStartTime = System.currentTimeMillis(); long containerStartTime = System.currentTimeMillis();
try { try {
ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
@ -186,6 +186,10 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
ComponentInstanceEvent event) { ComponentInstanceEvent event) {
compInstance.component.incContainersReady(); compInstance.component.incContainersReady();
compInstance.containerSpec.setState(ContainerState.READY); compInstance.containerSpec.setState(ContainerState.READY);
if (compInstance.timelineServiceEnabled) {
compInstance.serviceTimelinePublisher
.componentInstanceBecomeReady(compInstance.containerSpec);
}
} }
} }
@ -225,6 +229,10 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
.getDiagnostics(); .getDiagnostics();
compInstance.diagnostics.append(containerDiag + System.lineSeparator()); compInstance.diagnostics.append(containerDiag + System.lineSeparator());
if (compInstance.getState().equals(READY)) {
compInstance.component.decContainersReady();
}
compInstance.component.decRunningContainers();
boolean shouldExit = false; boolean shouldExit = false;
// check if it exceeds the failure threshold // check if it exceeds the failure threshold
if (comp.currentContainerFailure.get() > comp.maxContainerFailurePerComp) { if (comp.currentContainerFailure.get() > comp.maxContainerFailurePerComp) {
@ -250,10 +258,8 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
.submit(compInstance::cleanupRegistry); .submit(compInstance::cleanupRegistry);
if (compInstance.timelineServiceEnabled) { if (compInstance.timelineServiceEnabled) {
// record in ATS // record in ATS
compInstance.serviceTimelinePublisher compInstance.serviceTimelinePublisher.componentInstanceFinished
.componentInstanceFinished(compInstance, (compInstance, event.getStatus().getExitStatus(), containerDiag);
event.getStatus().getExitStatus(), event.getStatus().getState(),
containerDiag);
} }
compInstance.containerSpec.setState(ContainerState.STOPPED); compInstance.containerSpec.setState(ContainerState.STOPPED);
} }
@ -336,7 +342,7 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
container.setIp(StringUtils.join(",", status.getIPs())); container.setIp(StringUtils.join(",", status.getIPs()));
container.setHostname(status.getHost()); container.setHostname(status.getHost());
if (timelineServiceEnabled) { if (timelineServiceEnabled) {
serviceTimelinePublisher.componentInstanceUpdated(container); serviceTimelinePublisher.componentInstanceIPHostUpdated(container);
} }
} }
updateServiceRecord(yarnRegistryOperations, status); updateServiceRecord(yarnRegistryOperations, status);
@ -410,7 +416,10 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
} }
} }
// Release the container , cleanup registry, hdfs dir, and record in ATS // Called when user flexed down the container and ContainerStoppedTransition
// is not executed in this case.
// Release the container, dec running,
// cleanup registry, hdfs dir, and send record to ATS
public void destroy() { public void destroy() {
LOG.info(getCompInstanceId() + ": Flexed down by user, destroying."); LOG.info(getCompInstanceId() + ": Flexed down by user, destroying.");
diagnostics.append(getCompInstanceId() + ": Flexed down by user"); diagnostics.append(getCompInstanceId() + ": Flexed down by user");
@ -420,10 +429,18 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
.releaseAssignedContainer(container.getId()); .releaseAssignedContainer(container.getId());
getCompSpec().removeContainer(containerSpec); getCompSpec().removeContainer(containerSpec);
} }
// update metrics
if (getState() == STARTED) {
component.decRunningContainers();
}
if (getState() == READY) {
component.decContainersReady();
component.decRunningContainers();
}
if (timelineServiceEnabled) { if (timelineServiceEnabled) {
serviceTimelinePublisher serviceTimelinePublisher.componentInstanceFinished(this,
.componentInstanceFinished(this, KILLED_BY_APPMASTER, COMPLETE, KILLED_BY_APPMASTER, diagnostics.toString());
diagnostics.toString());
} }
scheduler.executorService.submit(this::cleanupRegistryAndCompHdfsDir); scheduler.executorService.submit(this::cleanupRegistryAndCompHdfsDir);
} }

View File

@ -28,7 +28,7 @@ public class YarnServiceConf {
// Retry settings for container failures // Retry settings for container failures
public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max"; public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max";
public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval"; public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval-ms";
public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts"; public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts";
public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory"; public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory";
@ -40,6 +40,13 @@ public class YarnServiceConf {
public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:"; public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:";
public static final int DEFAULT_API_SERVER_PORT = 9191; public static final int DEFAULT_API_SERVER_PORT = 9191;
public static final String FINAL_LOG_INCLUSION_PATTERN = "yarn.service.log.include-pattern";
public static final String FINAL_LOG_EXCLUSION_PATTERN = "yarn.service.log.exclude-pattern";
public static final String ROLLING_LOG_INCLUSION_PATTERN = "yarn.service.rolling-log.include-pattern";
public static final String ROLLING_LOG_EXCLUSION_PATTERN = "yarn.service.rolling-log.exclude-pattern";
/** /**
* The yarn service base path: * The yarn service base path:
* Defaults to HomeDir/.yarn/ * Defaults to HomeDir/.yarn/
@ -98,4 +105,9 @@ public class YarnServiceConf {
Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) { Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue)); return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue));
} }
public static String get(String name, String defaultVal,
Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
return userConf.getProperty(name, systemConf.get(name, defaultVal));
}
} }

View File

@ -87,4 +87,6 @@ public interface YarnServiceConstants {
String OUT_FILE = "stdout.txt"; String OUT_FILE = "stdout.txt";
String ERR_FILE = "stderr.txt"; String ERR_FILE = "stderr.txt";
String CONTENT = "content";
} }

View File

@ -70,9 +70,9 @@ public final class MonitorUtils {
try { try {
switch (readinessCheck.getType()) { switch (readinessCheck.getType()) {
case HTTP: case HTTP:
return HttpProbe.create(readinessCheck.getProps()); return HttpProbe.create(readinessCheck.getProperties());
case PORT: case PORT:
return PortProbe.create(readinessCheck.getProps()); return PortProbe.create(readinessCheck.getProperties());
default: default:
return null; return null;
} }

View File

@ -27,10 +27,13 @@ import org.apache.hadoop.yarn.service.utils.SliderUtils;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.text.MessageFormat;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants.CONTENT;
public abstract class AbstractClientProvider { public abstract class AbstractClientProvider {
public AbstractClientProvider() { public AbstractClientProvider() {
@ -84,11 +87,15 @@ public abstract class AbstractClientProvider {
throw new IllegalArgumentException("File type is empty"); throw new IllegalArgumentException("File type is empty");
} }
if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE) && StringUtils if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE)) {
.isEmpty(file.getSrcFile())) { if (StringUtils.isEmpty(file.getSrcFile()) &&
throw new IllegalArgumentException( !file.getProperties().containsKey(CONTENT)) {
"Src_file is empty for " + ConfigFile.TypeEnum.TEMPLATE); throw new IllegalArgumentException(MessageFormat.format("For {0} " +
"format, either src_file must be specified in ConfigFile," +
" or the \"{1}\" key must be specified in " +
"the 'properties' field of ConfigFile. ",
ConfigFile.TypeEnum.TEMPLATE, CONTENT));
}
} }
if (!StringUtils.isEmpty(file.getSrcFile())) { if (!StringUtils.isEmpty(file.getSrcFile())) {
Path p = new Path(file.getSrcFile()); Path p = new Path(file.getSrcFile());

View File

@ -272,8 +272,8 @@ public class ProviderUtils implements YarnServiceConstants {
break; break;
} }
} else { } else {
// non-template // If src_file is not specified
resolveNonTemplateConfigsAndSaveOnHdfs(fs, tokensForSubstitution, resolvePropsInConfigFileAndSaveOnHdfs(fs, tokensForSubstitution,
instance, configFile, fileName, remoteFile); instance, configFile, fileName, remoteFile);
} }
} }
@ -297,17 +297,17 @@ public class ProviderUtils implements YarnServiceConstants {
} }
} }
private static void resolveNonTemplateConfigsAndSaveOnHdfs(SliderFileSystem fs, private static void resolvePropsInConfigFileAndSaveOnHdfs(SliderFileSystem fs,
Map<String, String> tokensForSubstitution, ComponentInstance instance, Map<String, String> tokensForSubstitution, ComponentInstance instance,
ConfigFile configFile, String fileName, Path remoteFile) ConfigFile configFile, String fileName, Path remoteFile)
throws IOException { throws IOException {
// substitute non-template configs // substitute non-template configs
substituteMapWithTokens(configFile.getProps(), tokensForSubstitution); substituteMapWithTokens(configFile.getProperties(), tokensForSubstitution);
// write configs onto hdfs // write configs onto hdfs
PublishedConfiguration publishedConfiguration = PublishedConfiguration publishedConfiguration =
new PublishedConfiguration(fileName, new PublishedConfiguration(fileName,
configFile.getProps().entrySet()); configFile.getProperties().entrySet());
if (!fs.getFileSystem().exists(remoteFile)) { if (!fs.getFileSystem().exists(remoteFile)) {
PublishedConfigurationOutputter configurationOutputter = PublishedConfigurationOutputter configurationOutputter =
PublishedConfigurationOutputter.createOutputter( PublishedConfigurationOutputter.createOutputter(
@ -343,7 +343,7 @@ public class ProviderUtils implements YarnServiceConstants {
confCopy.set(entry.getKey(), entry.getValue()); confCopy.set(entry.getKey(), entry.getValue());
} }
// substitute properties // substitute properties
for (Map.Entry<String, String> entry : configFile.getProps().entrySet()) { for (Map.Entry<String, String> entry : configFile.getProperties().entrySet()) {
confCopy.set(entry.getKey(), entry.getValue()); confCopy.set(entry.getKey(), entry.getValue());
} }
// substitute env variables // substitute env variables

View File

@ -30,5 +30,7 @@ public enum ServiceTimelineEvent {
COMPONENT_INSTANCE_UNREGISTERED, COMPONENT_INSTANCE_UNREGISTERED,
COMPONENT_INSTANCE_UPDATED COMPONENT_INSTANCE_IP_HOST_UPDATE,
COMPONENT_INSTANCE_BECOME_READY
} }

View File

@ -20,19 +20,13 @@ package org.apache.hadoop.yarn.service.timelineservice;
import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
import org.apache.hadoop.yarn.client.api.TimelineV2Client; import org.apache.hadoop.yarn.client.api.TimelineV2Client;
import org.apache.hadoop.yarn.service.ServiceContext; import org.apache.hadoop.yarn.service.ServiceContext;
import org.apache.hadoop.yarn.service.api.records.Service; import org.apache.hadoop.yarn.service.api.records.*;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.ConfigFile;
import org.apache.hadoop.yarn.service.api.records.Configuration;
import org.apache.hadoop.yarn.service.api.records.Container;
import org.apache.hadoop.yarn.service.api.records.ServiceState;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -46,6 +40,8 @@ import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import static org.apache.hadoop.yarn.service.api.records.ContainerState.READY;
import static org.apache.hadoop.yarn.service.api.records.ContainerState.STOPPED;
import static org.apache.hadoop.yarn.service.timelineservice.ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO; import static org.apache.hadoop.yarn.service.timelineservice.ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO;
/** /**
@ -117,8 +113,8 @@ public class ServiceTimelinePublisher extends CompositeService {
// publish system config - YarnConfiguration // publish system config - YarnConfiguration
populateTimelineEntity(systemConf.iterator(), service.getId(), populateTimelineEntity(systemConf.iterator(), service.getId(),
ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); ServiceTimelineEntityType.SERVICE_ATTEMPT.toString());
// publish user conf // publish container conf
publishUserConf(service.getConfiguration(), service.getId(), publishContainerConf(service.getConfiguration(), service.getId(),
ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); ServiceTimelineEntityType.SERVICE_ATTEMPT.toString());
// publish component as separate entity. // publish component as separate entity.
@ -183,7 +179,7 @@ public class ServiceTimelinePublisher extends CompositeService {
} }
public void componentInstanceFinished(ComponentInstance instance, public void componentInstanceFinished(ComponentInstance instance,
int exitCode, ContainerState state, String diagnostics) { int exitCode, String diagnostics) {
TimelineEntity entity = createComponentInstanceEntity( TimelineEntity entity = createComponentInstanceEntity(
instance.getContainer().getId().toString()); instance.getContainer().getId().toString());
@ -192,7 +188,7 @@ public class ServiceTimelinePublisher extends CompositeService {
entityInfos.put(ServiceTimelineMetricsConstants.EXIT_STATUS_CODE, entityInfos.put(ServiceTimelineMetricsConstants.EXIT_STATUS_CODE,
exitCode); exitCode);
entityInfos.put(DIAGNOSTICS_INFO, diagnostics); entityInfos.put(DIAGNOSTICS_INFO, diagnostics);
entityInfos.put(ServiceTimelineMetricsConstants.STATE, state); entityInfos.put(ServiceTimelineMetricsConstants.STATE, STOPPED);
entity.addInfo(entityInfos); entity.addInfo(entityInfos);
// add an event // add an event
@ -205,7 +201,7 @@ public class ServiceTimelinePublisher extends CompositeService {
putEntity(entity); putEntity(entity);
} }
public void componentInstanceUpdated(Container container) { public void componentInstanceIPHostUpdated(Container container) {
TimelineEntity entity = createComponentInstanceEntity(container.getId()); TimelineEntity entity = createComponentInstanceEntity(container.getId());
// create info keys // create info keys
@ -218,14 +214,27 @@ public class ServiceTimelinePublisher extends CompositeService {
entity.addInfo(entityInfos); entity.addInfo(entityInfos);
TimelineEvent updateEvent = new TimelineEvent(); TimelineEvent updateEvent = new TimelineEvent();
updateEvent updateEvent.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_IP_HOST_UPDATE
.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_UPDATED.toString()); .toString());
updateEvent.setTimestamp(System.currentTimeMillis()); updateEvent.setTimestamp(System.currentTimeMillis());
entity.addEvent(updateEvent); entity.addEvent(updateEvent);
putEntity(entity); putEntity(entity);
} }
public void componentInstanceBecomeReady(Container container) {
TimelineEntity entity = createComponentInstanceEntity(container.getId());
Map<String, Object> entityInfo = new HashMap<>();
entityInfo.put(ServiceTimelineMetricsConstants.STATE, READY);
entity.addInfo(entityInfo);
TimelineEvent updateEvent = new TimelineEvent();
updateEvent.setId(ServiceTimelineEvent.COMPONENT_INSTANCE_BECOME_READY
.toString());
updateEvent.setTimestamp(System.currentTimeMillis());
entity.addEvent(updateEvent);
putEntity(entity);
}
private void publishComponents(List<Component> components) { private void publishComponents(List<Component> components) {
long currentTimeMillis = System.currentTimeMillis(); long currentTimeMillis = System.currentTimeMillis();
for (Component component : components) { for (Component component : components) {
@ -266,22 +275,19 @@ public class ServiceTimelinePublisher extends CompositeService {
putEntity(entity); putEntity(entity);
// publish component specific configurations // publish container specific configurations
publishUserConf(component.getConfiguration(), component.getName(), publishContainerConf(component.getConfiguration(), component.getName(),
ServiceTimelineEntityType.COMPONENT.toString()); ServiceTimelineEntityType.COMPONENT.toString());
} }
} }
private void publishUserConf(Configuration configuration, private void publishContainerConf(Configuration configuration,
String entityId, String entityType) { String entityId, String entityType) {
populateTimelineEntity(configuration.getProperties().entrySet().iterator(),
entityId, entityType);
populateTimelineEntity(configuration.getEnv().entrySet().iterator(), populateTimelineEntity(configuration.getEnv().entrySet().iterator(),
entityId, entityType); entityId, entityType);
for (ConfigFile configFile : configuration.getFiles()) { for (ConfigFile configFile : configuration.getFiles()) {
populateTimelineEntity(configFile.getProps().entrySet().iterator(), populateTimelineEntity(configFile.getProperties().entrySet().iterator(),
entityId, entityType); entityId, entityType);
} }
} }

View File

@ -96,9 +96,9 @@ public class TestAppJsonResolve extends Assert {
props.put("k1", "overridden"); props.put("k1", "overridden");
props.put("k2", "v2"); props.put("k2", "v2");
files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum
.PROPERTIES).props(props)); .PROPERTIES).properties(props));
files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum
.XML).props(Collections.singletonMap("k3", "v3"))); .XML).properties(Collections.singletonMap("k3", "v3")));
assertTrue(files.contains(simple.getFiles().get(0))); assertTrue(files.contains(simple.getFiles().get(0)));
assertTrue(files.contains(simple.getFiles().get(1))); assertTrue(files.contains(simple.getFiles().get(1)));
@ -113,9 +113,9 @@ public class TestAppJsonResolve extends Assert {
props.put("k1", "v1"); props.put("k1", "v1");
files.clear(); files.clear();
files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum
.PROPERTIES).props(props)); .PROPERTIES).properties(props));
files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum
.XML).props(Collections.singletonMap("k3", "v3"))); .XML).properties(Collections.singletonMap("k3", "v3")));
assertTrue(files.contains(master.getFiles().get(0))); assertTrue(files.contains(master.getFiles().get(0)));
assertTrue(files.contains(master.getFiles().get(1))); assertTrue(files.contains(master.getFiles().get(1)));

View File

@ -160,7 +160,7 @@ public class TestServiceTimelinePublisher {
// updated container state // updated container state
container.setState(ContainerState.READY); container.setState(ContainerState.READY);
serviceTimelinePublisher.componentInstanceUpdated(container); serviceTimelinePublisher.componentInstanceIPHostUpdated(container);
lastPublishedEntities = lastPublishedEntities =
((DummyTimelineClient) timelineClient).getLastPublishedEntities(); ((DummyTimelineClient) timelineClient).getLastPublishedEntities();
assertEquals(1, lastPublishedEntities.size()); assertEquals(1, lastPublishedEntities.size());

View File

@ -10,7 +10,7 @@
{ {
"type": "PROPERTIES", "type": "PROPERTIES",
"dest_file": "file1", "dest_file": "file1",
"props": { "properties": {
"k1": "v1", "k1": "v1",
"k2": "v2" "k2": "v2"
} }
@ -18,7 +18,7 @@
{ {
"type": "XML", "type": "XML",
"dest_file": "file2", "dest_file": "file2",
"props": { "properties": {
"k3": "v3" "k3": "v3"
} }
} }
@ -38,7 +38,7 @@
{ {
"type": "PROPERTIES", "type": "PROPERTIES",
"dest_file": "file1", "dest_file": "file1",
"props": { "properties": {
"k1": "overridden" "k1": "overridden"
} }
} }

View File

@ -0,0 +1,165 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
This document describes how to configure the services to be deployed on YARN
There are mainly three types of configurations:
* The configurations specific to the custom service running on YARN . E.g. the hbase-site.xml for a Hbase service running on YARN.
* It can be specified at both global service level(`Service#configuration`) or component level(`Component#configuration`).
* Service-level configs are considered as the default configs for all components and component-level config can override service level config.
* All config properties that uses constant variables as described below are subject to substitutions.
* The configurations specific to YARN service AM. (`Configuration#properties`).
* E.g. The `yarn.service.am-restart.max-attempts` which controls how many times
the framework AM can be retried if it fails. These configs are mainly to control the behavior of the framework AM , rather than the custom services launched by the framework.
* Some constants such as `SERVICE_NAME` for referring some system properties.
* They are substituted by the service AM before writing the config files.
Below describes the details for each type of configurations.
## Configuration for custom service
Below is how a configuration object typically looks like:
```
"configuration" : {
"properties" : {
"yarn.service.am-restart.max-attempts" : 10 // config for the service AM
},
"env" : { // The environment variables to be exported when container gets launched
"env1" : "val1"
},
"files" : [ // The list of configuration files to be mounted for the container
{
"type": "HADOOP_XML", // The format of the config file into which the "properties" are dumped
"dest_file": "/etc/hadoop/conf/core-site.xml", // The location where the config file is mounted inside the container
"properties" : { // The list of key/value pairs to be dumped into the config file
"fs.defaultFS" : "hdfs://myhdfs" // This property will be written into core-site.xml
}
},
{
"type": "HADOOP_XML", // The format of the config file.
"src_file" : ""hdfs://mycluster/tmp/conf/yarn-site.xml"" // The location of the source config file to be downloaded
"dest_file": "/etc/hadoop/conf/yarn-site.xml", // The location where the config file will be mounted inside the container/
"properties" : {
"yarn.resourcemanager.webapp.address" : "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}" // Merge into (or override existing property in) yarn-site.xml
}
}
]
}
```
* properties: the configurations for service AM. Details below.
* env : the environment variables to be exported when container gets launched.
* files : The list of configuration files to be mounted inside the container.
* type: The format of the config file(`dest_file`) to be mounted inside the container. If `src_file` is specified, it is also the format for both `src_file` and `dest_file`.
* HADOOP_XML : the hadoop xml format. If `src_file` is specified, the file content will be read as parsed in hadoop xml format.
* XML : the standard xml format
* JSON : the standard JSON format
* YAML : the YAML format
* PROPERTIES : the java PROPERTIES format
* TEMPLATE : the plain text format. If `src_file` is specified, the content of the `src_file` will be written into `dest_file` post constant substitution.
If `src_file` is not specified, use `content` as the key in `properties` field, and the value will be the actual content to be written in the `dest_file` post constant substitution. E.g
```
{
"type": "TEMPLATE"
"dest_file": "/etc/conf/hello"
"properties" : {
"content" : "Hello world"
}
}
```
The string `Hello world` will be written into a file located at `/etc/conf/hello` inside the container.
* src_file : [optional], the source location of the config file at a network accessible location such as hdfs.
* The format of both `src_file` and `dest_file` are defined by `type`.
* Currently, It only works with `HADOOP_XML` and `TEMPLATE` type.
* The `src_file` will be downloaded by YARN NodeManager and be mounted inside the container as in the location specified by `dest_file`.
* If any properties specified in the `properties` field, they are added into (or overwriting existing properties in) the `src_file`.
* If `src_file` is not specified, only the properties in the `properties` field will be written into the `dest_file` file.
* dest_file : the location where the config file is mounted inside the container. The file format is defined by `type`.
dest_file can be an absolute path or a relative path. If it's a relative path, the file will be located in the `$PWD/conf` directory (where `$PWD` is the container local directory which is mounted to all docker containers launched by YARN)
* properties : The list of key/value pair configs to be written into the `dest_file` in the format as defined in `type`. If `src_file` is specified, these properties will be added into (or overwriting existing properties in) the `src_file`.
## Configuration for YARN service AM
This section describes the configurations for configuring the YARN service AM.
These can be specified either in the cluster `yarn-site.xml` at the global level or in the `properties` field of the `Configuration` object as per service basis like below:
```
{
"configuration" : {
"properties" : {
"yarn.service.am-restart.max-attempts" : 10
}
}
}
```
Above config make the service AM to be retried at max 10 times.
#### Available configurations:
| Name | Description |
| ------------ | ------------- |
|yarn.service.client-am.retry.max-wait-ms | the max retry time in milliseconds for the service client to talk to the service AM. By default, it is set to 0, which means no retry |
|yarn.service.client-am.retry-interval-ms | the retry interval in milliseconds for the service client to talk to the service AM. By default, it is 2000, i.e. 2 seconds |
|yarn.service.container-failure.retry.max | the max number of retries for the container to be auto restarted if it fails. By default, it is set to -1, which means forever.
|yarn.service.container-failure.retry-interval-ms| the retry interval in milliseconds for the container to be restarted. By default, it is 30000, i.e. 30 seconds |
|yarn.service.am-restart.max-attempts| the max number of attempts for the framework AM
|yarn.service.am-resource.memory | the memory size in GB for the framework AM. By default, it is set to 1024
|yarn.service.queue | the default queue to which the service will be submitted. By default, it is submitted to `default` queue
|yarn.service.base.path | the root location for the service artifacts on hdfs for a user. By default, it is under ${user_home_dir}/.yarn/
|yarn.service.container-failure-per-component.threshold | the max number of container failures for a given component before the AM exits.
|yarn.service.node-blacklist.threshold | Maximum number of container failures on a node before the node is blacklisted by the AM
|yarn.service.failure-count-reset.window | The interval in seconds when the `yarn.service.container-failure-per-component.threshold` and `yarn.service.node-blacklist.threshold` gets reset. By default, it is 21600, i.e. 6 hours
|yarn.service.readiness-check-interval.seconds | The interval in seconds between readiness checks. By default, it is 30 seconds
|yarn.service.log.include-pattern| The regex expression for including log files whose file name matches it when aggregating the logs after the application completes.
|yarn.service.log.exclude-pattern| The regex expression for excluding log files whose file name matches it when aggregating the logs after the application completes. If the log file name matches both include and exclude pattern, this file will be excluded.
|yarn.service.rolling-log.include-pattern| The regex expression for including log files whose file name matches it when aggregating the logs while app is running.
|yarn.service.rolling-log.exclude-pattern| The regex expression for excluding log files whose file name matches it when aggregating the logs while app is running. If the log file name matches both include and exclude pattern, this file will be excluded.
## Constant variables for custom service
The service framework provides some constant variables for user to configure their services. These variables are either dynamically generated by the system or are static ones such as service name defined by the user.
User can use these constants in their configurations to be dynamically substituted by the service AM.E.g.
```
{
"type" : "HADOOP_XML",
"dest_file" : "/etc/hadoop/hbase-site.xml",
"properties" : {
"hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}"
}
}
```
Here, `COMPONENT_INSTANCE_NAME` and `SERVICE_NAME` are the constants to be substituted by the system.
Suppose the `COMPONENT_INSTANCE_NAME` is `regionserver-0` and `SERVICE_NAME` is defined by user as `hbase`, user name is `devuser` and domain name is `hwxdev.site`.
Then, the config will be substituted by the service AM and written in the config file `/etc/hadoop/hbase-site.xml` inside the container as below:
```
<property>
<name>hbase.regionserver.hostname</name>
<value>regionserver-0.hbase.devuser.hwxdev.site</value>
</property>
```
where `regionserver-0` is the actual component instance name assigned by the system for this container.
#### Available constants:
| Name | Description |
| ------------ | ------------- |
| SERVICE_NAME | name of the service defined by the user
| USER | user who submits the service |
| DOMAIN | the domain name for the cluster |
| COMPONENT_NAME | the name for a given component |
| COMPONENT_INSTANCE_NAME | the name for a given component instance (i.e. container) |
| COMPONENT_ID | the monotonically increasing integer for a given component
| CONTAINER_ID | the YARN container Id for a given container |
| ${COMPONENT_INSTANCE_NAME}_HOST | the hostname for a component instance (i.e. container), e.g. REGIONSERVER-0_HOST will be substituted by the actual hostname of the component instance. Note all characters must be uppercase. |
| ${COMPONENT_INSTANCE_NAME}_IP | the ip for a component instance (i.e. container), e.g. REGIONSERVER-0_IP will be substituted by the actual IP address of the component instance. Note all characters must be uppercase. |
| CLUSTER_FS_URI | the URI of the cluster hdfs |

View File

@ -45,7 +45,7 @@ Note this example requires registry DNS.
{ {
"type": "ENV", "type": "ENV",
"dest_file": "/var/www/html/index.html", "dest_file": "/var/www/html/index.html",
"props": { "properties": {
"content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>" "content": "<html><header><title>Title</title></header><body>Hello from ${COMPONENT_INSTANCE_NAME}!</body></html>"
} }
} }
@ -122,7 +122,7 @@ A readiness check is added for the `httpd` component:
``` ```
"readiness_check": { "readiness_check": {
"type": "HTTP", "type": "HTTP",
"props": { "properties": {
"url": "http://${THIS_HOST}:8080" "url": "http://${THIS_HOST}:8080"
} }
}, },

View File

@ -52,9 +52,10 @@ The benefits of combining these workloads are two-fold:
* [Concepts](Concepts.md): Describes the internals of the framework and some features in YARN core to support running services on YARN. * [Concepts](Concepts.md): Describes the internals of the framework and some features in YARN core to support running services on YARN.
* [Service REST API](YarnServiceAPI.md): The API doc for deploying/managing services on YARN. * [Service REST API](YarnServiceAPI.md): The API doc for deploying/managing services on YARN.
* [Service Discovery](ServiceDiscovery.md): Descirbes the service discovery mechanism on YARN. * [Service Discovery](ServiceDiscovery.md): Describes the service discovery mechanism on YARN.
* [Registry DNS](RegistryDNS.md): Deep dives into the Registry DNS internals. * [Registry DNS](RegistryDNS.md): Deep dives into the Registry DNS internals.
* [Examples](Examples.md): List some example service definitions (`Yarnfile`). * [Examples](Examples.md): List some example service definitions (`Yarnfile`).
* [Configurations](Configurations.md): Describes how to configure the custom services on YARN.

View File

@ -251,7 +251,7 @@ A config file that needs to be created and made available as a volume in a servi
|type|Config file in the standard format like xml, properties, json, yaml, template.|false|enum (XML, PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML)|| |type|Config file in the standard format like xml, properties, json, yaml, template.|false|enum (XML, PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML)||
|dest_file|The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers. If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.|false|string|| |dest_file|The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers. If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.|false|string||
|src_file|This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.|false|string|| |src_file|This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.|false|string||
|props|A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.|false|object|| |properties|A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.|false|object||
### Configuration ### Configuration
@ -260,7 +260,7 @@ Set of configuration properties that can be injected into the service components
|Name|Description|Required|Schema|Default| |Name|Description|Required|Schema|Default|
|----|----|----|----|----| |----|----|----|----|----|
|properties|A blob of key-value pairs of common service properties.|false|object|| |properties|A blob of key-value pairs for configuring YARN service AM.|false|object||
|env|A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.|false|object|| |env|A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.|false|object||
|files|Array of list of files that needs to be created and made available as volumes in the service component containers.|false|ConfigFile array|| |files|Array of list of files that needs to be created and made available as volumes in the service component containers.|false|ConfigFile array||
@ -308,7 +308,7 @@ A custom command or a pluggable helper container to determine the readiness of a
|Name|Description|Required|Schema|Default| |Name|Description|Required|Schema|Default|
|----|----|----|----|----| |----|----|----|----|----|
|type|E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).|true|enum (HTTP, PORT)|| |type|E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).|true|enum (HTTP, PORT)||
|props|A blob of key value pairs that will be used to configure the check.|false|object|| |properties|A blob of key value pairs that will be used to configure the check.|false|object||
|artifact|Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET|false|Artifact|| |artifact|Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET|false|Artifact||
@ -529,14 +529,14 @@ POST URL - http://localhost:8088:/ws/v1/services/hbase-app-1
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hadoop/conf/core-site.xml", "dest_file": "/etc/hadoop/conf/core-site.xml",
"props": { "properties": {
"fs.defaultFS": "${CLUSTER_FS_URI}" "fs.defaultFS": "${CLUSTER_FS_URI}"
} }
}, },
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hbase/conf/hbase-site.xml", "dest_file": "/etc/hbase/conf/hbase-site.xml",
"props": { "properties": {
"hbase.cluster.distributed": "true", "hbase.cluster.distributed": "true",
"hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
"hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",
@ -569,14 +569,14 @@ POST URL - http://localhost:8088:/ws/v1/services/hbase-app-1
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hadoop/conf/core-site.xml", "dest_file": "/etc/hadoop/conf/core-site.xml",
"props": { "properties": {
"fs.defaultFS": "${CLUSTER_FS_URI}" "fs.defaultFS": "${CLUSTER_FS_URI}"
} }
}, },
{ {
"type": "XML", "type": "XML",
"dest_file": "/etc/hbase/conf/hbase-site.xml", "dest_file": "/etc/hbase/conf/hbase-site.xml",
"props": { "properties": {
"hbase.cluster.distributed": "true", "hbase.cluster.distributed": "true",
"hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}",
"hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase",

View File

@ -49,7 +49,7 @@ export default Ember.Controller.extend({
contentPath: 'memory' contentPath: 'memory'
}, { }, {
id: 'instances', id: 'instances',
headerTitle: 'Number Of Instances', headerTitle: 'Number of Instances',
contentPath: 'instances', contentPath: 'instances',
observePath: true observePath: true
}); });

View File

@ -20,7 +20,7 @@
<div class="row"> <div class="row">
<div class="panel panel-default"> <div class="panel panel-default">
<div class="panel-heading"> <div class="panel-heading">
<div class="panel-title">{{type}} Metrics: Detailed Information</div> <div class="panel-title">{{type}} Metrics:</div>
</div> </div>
<div class=""> <div class="">
<table class="table table-hover table-custom-bordered table-custom-stripped table-radius-none table-border-none"> <table class="table table-hover table-custom-bordered table-custom-stripped table-radius-none table-border-none">

View File

@ -20,7 +20,7 @@
{{#if model.container}} {{#if model.container}}
<div class="panel panel-default"> <div class="panel panel-default">
<div class="panel-heading"> <div class="panel-heading">
<div class="panel-title">Component Information</div> <div class="panel-title">Component Instance Information</div>
</div> </div>
<div class=""> <div class="">
<table class="table table-striped table-bordered table-hover"> <table class="table table-striped table-bordered table-hover">
@ -30,7 +30,7 @@
<td>{{check-availability model.container.instanceName}}</td> <td>{{check-availability model.container.instanceName}}</td>
</tr> </tr>
<tr> <tr>
<td>Component Group</td> <td>Component</td>
<td>{{check-availability model.container.component}}</td> <td>{{check-availability model.container.component}}</td>
</tr> </tr>
<tr> <tr>

View File

@ -18,7 +18,7 @@
<div class="row"> <div class="row">
<div class="col-md-12"> <div class="col-md-12">
<h3>Active Components: {{model.componentName}}</h3> <h3>Component: {{model.componentName}}</h3>
{{em-table columns=tableColumns rows=model.instances}} {{em-table columns=tableColumns rows=model.instances}}
</div> </div>
</div> </div>