YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

This commit is contained in:
Jian He 2017-04-20 23:53:04 +08:00
parent c3ac67e3e1
commit 384ee13eec
153 changed files with 16304 additions and 1604 deletions

View File

@ -50,6 +50,7 @@ import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@ -251,10 +252,12 @@ public class ApplicationApiService {
.getNumberOfContainers()).build();
}
try {
long original = SLIDER_CLIENT.flex(appName, component);
return Response.ok().entity(
"Updating " + componentName + " size from " + original + " to "
+ component.getNumberOfContainers()).build();
Map<String, Long> original = SLIDER_CLIENT.flex(appName, Collections
.singletonMap(component.getName(),
component.getNumberOfContainers()));
return Response.ok().entity("Updating " + componentName + " size from "
+ original.get(componentName) + " to "
+ component.getNumberOfContainers()).build();
} catch (YarnException | IOException e) {
ApplicationStatus status = new ApplicationStatus();
status.setDiagnostics(e.getMessage());

View File

@ -327,15 +327,37 @@
<artifactId>easymock</artifactId>
<version>3.1</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.objenesis</groupId>
<artifactId>objenesis</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-easymock</artifactId>
<version>1.5</version>
<version>1.6.5</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>1.6.5</version>
<exclusions>
<exclusion>
<groupId>org.javassist</groupId>
<artifactId>javassist</artifactId>
</exclusion>
<exclusion>
<groupId>org.objenesis</groupId>
<artifactId>objenesis</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -359,6 +381,13 @@
<artifactId>swagger-annotations</artifactId>
<version>1.5.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<scope>test</scope>
</dependency>
</dependencies>

View File

@ -126,9 +126,9 @@ public interface InternalKeys {
String CHAOS_MONKEY_INTERVAL_MINUTES = CHAOS_MONKEY_INTERVAL + ".minutes";
String CHAOS_MONKEY_INTERVAL_SECONDS = CHAOS_MONKEY_INTERVAL + ".seconds";
int DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS = 0;
int DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS = 0;
int DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES = 0;
long DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS = 0;
long DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS = 0;
long DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES = 0;
String CHAOS_MONKEY_DELAY = "internal.chaos.monkey.delay";
String CHAOS_MONKEY_DELAY_DAYS = CHAOS_MONKEY_DELAY + ".days";

View File

@ -71,6 +71,15 @@ public interface ResourceKeys {
*/
String YARN_CORES = "yarn.vcores";
/**
* If normalization is set to false, then if the resource (memory and/or
* vcore) requested by a role is higher than YARN limits, then the resource
* request is not normalized. If this causes failures at the YARN level then
* applications are expecting that to happen. Default value is true.
*/
String YARN_RESOURCE_NORMALIZATION_ENABLED =
"yarn.resource.normalization.enabled";
/**
* Number of disks per instance to ask YARN for
* {@value}
@ -140,9 +149,9 @@ public interface ResourceKeys {
int DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS = 0;
int DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS = 6;
int DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES = 0;
long DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS = 0;
long DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS = 6;
long DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES = 0;
/**

View File

@ -53,8 +53,8 @@ public interface SliderClusterProtocol extends VersionedProtocol {
YarnException;
Messages.FlexComponentResponseProto flexComponent(
Messages.FlexComponentRequestProto request) throws IOException;
Messages.FlexComponentsResponseProto flexComponents(
Messages.FlexComponentsRequestProto request) throws IOException;
/**
* Get the current cluster status

View File

@ -116,6 +116,28 @@ public class Configuration implements Serializable {
return Long.parseLong(value);
}
public int getPropertyInt(String name, int defaultValue) {
if (name == null) {
return defaultValue;
}
String value = properties.get(name.trim());
if (StringUtils.isEmpty(value)) {
return defaultValue;
}
return Integer.parseInt(value);
}
public boolean getPropertyBool(String name, boolean defaultValue) {
if (name == null) {
return defaultValue;
}
String value = properties.get(name.trim());
if (StringUtils.isEmpty(value)) {
return defaultValue;
}
return Boolean.parseBoolean(value);
}
public String getProperty(String name, String defaultValue) {
if (name == null) {
return defaultValue;

View File

@ -35,11 +35,8 @@ public class RoleStatistics {
public long limitsExceeded = 0L;
public long nodeFailed = 0L;
public long preempted = 0L;
public long releasing = 0L;
public long requested = 0L;
public long started = 0L;
public long startFailed = 0L;
public long totalRequested = 0L;
/**
* Add another statistics instance
@ -56,11 +53,8 @@ public class RoleStatistics {
limitsExceeded += that.limitsExceeded;
nodeFailed += that.nodeFailed;
preempted += that.preempted;
releasing += that.releasing;
requested += that.requested;
started += that.started;
startFailed += that.totalRequested;
totalRequested += that.totalRequested;
return this;
}
}

View File

@ -20,12 +20,9 @@ package org.apache.slider.client;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.Files;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
@ -67,7 +64,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.Times;
import org.apache.slider.api.ClusterNode;
import org.apache.slider.api.SliderClusterProtocol;
import org.apache.slider.api.proto.Messages;
import org.apache.slider.api.resource.Application;
@ -83,22 +79,17 @@ import org.apache.slider.common.params.AbstractActionArgs;
import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
import org.apache.slider.common.params.ActionAMSuicideArgs;
import org.apache.slider.common.params.ActionClientArgs;
import org.apache.slider.common.params.ActionCreateArgs;
import org.apache.slider.common.params.ActionDependencyArgs;
import org.apache.slider.common.params.ActionDiagnosticArgs;
import org.apache.slider.common.params.ActionEchoArgs;
import org.apache.slider.common.params.ActionExistsArgs;
import org.apache.slider.common.params.ActionFlexArgs;
import org.apache.slider.common.params.ActionFreezeArgs;
import org.apache.slider.common.params.ActionInstallKeytabArgs;
import org.apache.slider.common.params.ActionInstallPackageArgs;
import org.apache.slider.common.params.ActionKDiagArgs;
import org.apache.slider.common.params.ActionKeytabArgs;
import org.apache.slider.common.params.ActionKillContainerArgs;
import org.apache.slider.common.params.ActionListArgs;
import org.apache.slider.common.params.ActionLookupArgs;
import org.apache.slider.common.params.ActionNodesArgs;
import org.apache.slider.common.params.ActionPackageArgs;
import org.apache.slider.common.params.ActionRegistryArgs;
import org.apache.slider.common.params.ActionResolveArgs;
import org.apache.slider.common.params.ActionResourceArgs;
@ -122,7 +113,6 @@ import org.apache.slider.core.exceptions.NotFoundException;
import org.apache.slider.core.exceptions.SliderException;
import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
import org.apache.slider.core.exceptions.UsageException;
import org.apache.slider.core.exceptions.WaitTimeoutException;
import org.apache.slider.core.launch.ClasspathConstructor;
import org.apache.slider.core.launch.CredentialUtils;
import org.apache.slider.core.launch.JavaCommandLineBuilder;
@ -144,7 +134,6 @@ import org.apache.slider.core.zk.ZKIntegration;
import org.apache.slider.providers.AbstractClientProvider;
import org.apache.slider.providers.ProviderUtils;
import org.apache.slider.providers.SliderProviderFactory;
import org.apache.slider.providers.agent.AgentKeys;
import org.apache.slider.server.appmaster.SliderAppMaster;
import org.apache.slider.server.appmaster.rpc.RpcBinder;
import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
@ -160,11 +149,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.Console;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
@ -177,8 +164,6 @@ import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@ -226,7 +211,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
private ClientArgs serviceArgs;
public ApplicationId applicationId;
private String deployedClusterName;
/**
* Cluster operations against the deployed cluster -will be null
@ -334,23 +319,19 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
exitCode = actionAmSuicide(clusterName,
serviceArgs.getActionAMSuicideArgs());
break;
case ACTION_BUILD:
exitCode = actionBuild(getApplicationFromArgs(clusterName,
serviceArgs.getActionBuildArgs()));
break;
case ACTION_CLIENT:
exitCode = actionClient(serviceArgs.getActionClientArgs());
break;
case ACTION_CREATE:
ActionCreateArgs args = serviceArgs.getActionCreateArgs();
File file = args.getAppDef();
Path filePath = new Path(file.getAbsolutePath());
log.info("Loading app definition from: " + filePath);
Application application =
jsonSerDeser.load(FileSystem.getLocal(getConfig()), filePath);
if(args.lifetime > 0) {
application.setLifetime(args.lifetime);
}
application.setName(clusterName);
actionCreate(application);
actionCreate(getApplicationFromArgs(clusterName,
serviceArgs.getActionCreateArgs()));
break;
case ACTION_DEPENDENCY:
@ -391,14 +372,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
serviceArgs.getActionKillContainerArgs());
break;
case ACTION_INSTALL_KEYTAB:
exitCode = actionInstallKeytab(serviceArgs.getActionInstallKeytabArgs());
break;
case ACTION_INSTALL_PACKAGE:
exitCode = actionInstallPkg(serviceArgs.getActionInstallPackageArgs());
break;
case ACTION_KEYTAB:
exitCode = actionKeytab(serviceArgs.getActionKeytabArgs());
break;
@ -415,10 +388,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
exitCode = actionNodes("", serviceArgs.getActionNodesArgs());
break;
case ACTION_PACKAGE:
exitCode = actionPackage(serviceArgs.getActionPackageArgs());
break;
case ACTION_REGISTRY:
exitCode = actionRegistry(serviceArgs.getActionRegistryArgs());
break;
@ -605,15 +574,15 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
* force=true by default.
*/
@Override
public void actionDestroy(String appName)
public int actionDestroy(String appName)
throws YarnException, IOException {
validateClusterName(appName);
verifyNoLiveApp(appName, "Destroy");
Path appDir = sliderFileSystem.buildClusterDirPath(appName);
FileSystem fs = sliderFileSystem.getFileSystem();
if (fs.exists(appDir)) {
if (fs.delete(appDir, true)) {
log.info("Successfully deleted application + " + appName);
return;
log.info("Successfully deleted application dir for " + appName);
} else {
String message =
"Failed to delete application + " + appName + " at: " + appDir;
@ -627,7 +596,20 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
log.warn(message);
throw new YarnException(message);
}
//TODO clean registry
//TODO clean registry?
String registryPath = SliderRegistryUtils.registryPathForInstance(
appName);
try {
getRegistryOperations().delete(registryPath, true);
} catch (IOException e) {
log.warn("Error deleting registry entry {}: {} ", registryPath, e, e);
} catch (SliderException e) {
log.warn("Error binding to registry {} ", e, e);
}
log.info("Destroyed cluster {}", appName);
return EXIT_SUCCESS;
}
@ -648,6 +630,26 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
return factory.createClientProvider();
}
private Application getApplicationFromArgs(String clusterName,
AbstractClusterBuildingActionArgs args) throws IOException {
File file = args.getAppDef();
Path filePath = new Path(file.getAbsolutePath());
log.info("Loading app definition from: " + filePath);
Application application =
jsonSerDeser.load(FileSystem.getLocal(getConfig()), filePath);
if(args.lifetime > 0) {
application.setLifetime(args.lifetime);
}
application.setName(clusterName);
return application;
}
public int actionBuild(Application application) throws YarnException,
IOException {
Path appDir = checkAppNotExistOnHdfs(application);
persistApp(appDir, application);
return EXIT_SUCCESS;
}
public ApplicationId actionCreate(Application application)
throws IOException, YarnException {
@ -684,8 +686,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
}
submissionContext.setMaxAppAttempts(conf.getInt(KEY_AM_RESTART_LIMIT, 2));
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
Map<String, LocalResource> localResources = new HashMap<>();
// copy local slideram-log4j.properties to hdfs and add to localResources
boolean hasSliderAMLog4j =
@ -724,10 +725,16 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
amLaunchContext.setLocalResources(localResources);
addCredentialsIfSecure(conf, amLaunchContext);
submissionContext.setAMContainerSpec(amLaunchContext);
yarnClient.submitApplication(submissionContext);
submitApplication(submissionContext);
return submissionContext.getApplicationId();
}
@VisibleForTesting
public ApplicationId submitApplication(ApplicationSubmissionContext context)
throws IOException, YarnException {
return yarnClient.submitApplication(context);
}
private void printLocalResources(Map<String, LocalResource> map) {
log.info("Added LocalResource for localization: ");
StringBuilder builder = new StringBuilder();
@ -800,7 +807,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
private Map<String, String> addAMEnv(Configuration conf, Path tempPath)
throws IOException {
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
ClasspathConstructor classpath =
buildClasspath(SliderKeys.SUBMITTED_CONF_DIR, "lib",
sliderFileSystem, getUsingMiniMRCluster());
@ -929,69 +936,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
return 0;
}
// protected static void checkForCredentials(Configuration conf,
// ConfTree tree, String clusterName) throws IOException {
// if (tree.credentials == null || tree.credentials.isEmpty()) {
// log.info("No credentials requested");
// return;
// }
//
// Console console = System.console();
// for (Entry<String, List<String>> cred : tree.credentials.entrySet()) {
// String provider = cred.getKey()
// .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName)
// .replaceAll(Pattern.quote("${CLUSTER}"), clusterName);
// List<String> aliases = cred.getValue();
// if (aliases == null || aliases.isEmpty()) {
// continue;
// }
// Configuration c = new Configuration(conf);
// c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider);
// CredentialProvider credentialProvider = CredentialProviderFactory.getProviders(c).get(0);
// Set<String> existingAliases = new HashSet<>(credentialProvider.getAliases());
// for (String alias : aliases) {
// if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) {
// log.info("Credentials for " + alias + " found in " + provider);
// } else {
// if (console == null) {
// throw new IOException("Unable to input password for " + alias +
// " because System.console() is null; provider " + provider +
// " must be populated manually");
// }
// char[] pass = readPassword(alias, console);
// credentialProvider.createCredentialEntry(alias, pass);
// credentialProvider.flush();
// Arrays.fill(pass, ' ');
// }
// }
// }
// }
private static char[] readPassword(String alias, Console console)
throws IOException {
char[] cred = null;
boolean noMatch;
do {
console.printf("%s %s: \n", PASSWORD_PROMPT, alias);
char[] newPassword1 = console.readPassword();
console.printf("%s %s again: \n", PASSWORD_PROMPT, alias);
char[] newPassword2 = console.readPassword();
noMatch = !Arrays.equals(newPassword1, newPassword2);
if (noMatch) {
if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
log.info(String.format("Passwords don't match. Try again."));
} else {
cred = newPassword1;
}
if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
} while (noMatch);
if (cred == null)
throw new IOException("Could not read credentials for " + alias +
" from stdin");
return cred;
}
@Override
public int actionKeytab(ActionKeytabArgs keytabInfo)
throws YarnException, IOException {
@ -1077,43 +1021,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
return EXIT_SUCCESS;
}
@Override
public int actionInstallKeytab(ActionInstallKeytabArgs installKeytabInfo)
throws YarnException, IOException {
log.warn("The 'install-keytab' option has been deprecated. Please use 'keytab --install'.");
return actionKeytab(new ActionKeytabArgs(installKeytabInfo));
}
@Override
public int actionInstallPkg(ActionInstallPackageArgs installPkgInfo) throws
YarnException,
IOException {
log.warn("The " + ACTION_INSTALL_PACKAGE
+ " option has been deprecated. Please use '"
+ ACTION_PACKAGE + " " + ClientArgs.ARG_INSTALL + "'.");
if (StringUtils.isEmpty(installPkgInfo.name)) {
throw new BadCommandArgumentsException(
E_INVALID_APPLICATION_TYPE_NAME + "\n"
+ CommonArgs.usage(serviceArgs, ACTION_INSTALL_PACKAGE));
}
Path srcFile = extractPackagePath(installPkgInfo.packageURI);
// Do not provide new options to install-package command as it is in
// deprecated mode. So version is kept null here. Use package --install.
Path pkgPath = sliderFileSystem.buildPackageDirPath(installPkgInfo.name,
null);
FileSystem sfs = sliderFileSystem.getFileSystem();
sfs.mkdirs(pkgPath);
Path fileInFs = new Path(pkgPath, srcFile.getName());
log.info("Installing package {} at {} and overwrite is {}.",
srcFile, fileInFs, installPkgInfo.replacePkg);
require(!(sfs.exists(fileInFs) && !installPkgInfo.replacePkg),
"Package exists at %s. : %s", fileInFs.toUri(), E_USE_REPLACEPKG_TO_OVERWRITE);
sfs.copyFromLocalFile(false, installPkgInfo.replacePkg, srcFile, fileInFs);
return EXIT_SUCCESS;
}
@Override
public int actionResource(ActionResourceArgs resourceInfo)
throws YarnException, IOException {
@ -1287,236 +1194,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
return EXIT_SUCCESS;
}
@Override
public int actionPackage(ActionPackageArgs actionPackageInfo)
throws YarnException, IOException {
initializeOutputStream(actionPackageInfo.out);
int exitCode = -1;
if (actionPackageInfo.help) {
exitCode = actionHelp(ACTION_PACKAGE);
}
if (actionPackageInfo.install) {
exitCode = actionPackageInstall(actionPackageInfo);
}
if (actionPackageInfo.delete) {
exitCode = actionPackageDelete(actionPackageInfo);
}
if (actionPackageInfo.list) {
exitCode = actionPackageList();
}
if (actionPackageInfo.instances) {
exitCode = actionPackageInstances();
}
finalizeOutputStream(actionPackageInfo.out);
if (exitCode != -1) {
return exitCode;
}
throw new BadCommandArgumentsException(
"Select valid package operation option");
}
private void initializeOutputStream(String outFile)
throws IOException {
if (outFile != null) {
clientOutputStream = new PrintStream(outFile, "UTF-8");
} else {
clientOutputStream = System.out;
}
}
private void finalizeOutputStream(String outFile) {
if (outFile != null && clientOutputStream != null) {
clientOutputStream.flush();
clientOutputStream.close();
}
clientOutputStream = System.out;
}
private int actionPackageInstances() throws YarnException, IOException {
// Map<String, Path> persistentInstances = sliderFileSystem
// .listPersistentInstances();
// if (persistentInstances.isEmpty()) {
// log.info("No slider cluster specification available");
// return EXIT_SUCCESS;
// }
// String pkgPathValue = sliderFileSystem
// .buildPackageDirPath(StringUtils.EMPTY, StringUtils.EMPTY).toUri()
// .getPath();
// FileSystem fs = sliderFileSystem.getFileSystem();
// Iterator<Map.Entry<String, Path>> instanceItr = persistentInstances
// .entrySet().iterator();
// log.info("List of applications with its package name and path");
// println("%-25s %15s %30s %s", "Cluster Name", "Package Name",
// "Package Version", "Application Location");
//TODO deal with packages
// while(instanceItr.hasNext()) {
// Map.Entry<String, Path> entry = instanceItr.next();
// String clusterName = entry.getKey();
// Path clusterPath = entry.getValue();
// AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
// clusterName, clusterPath);
// Path appDefPath = null;
// try {
// appDefPath = new Path(
// getApplicationDefinitionPath(instanceDefinition
// .getAppConfOperations()));
// } catch (BadConfigException e) {
// // Invalid cluster state, so move on to next. No need to log anything
// // as this is just listing of instances.
// continue;
// }
// if (!appDefPath.isUriPathAbsolute()) {
// appDefPath = new Path(fs.getHomeDirectory(), appDefPath);
// }
// String appDefPathStr = appDefPath.toUri().toString();
// try {
// if (appDefPathStr.contains(pkgPathValue) && fs.isFile(appDefPath)) {
// String packageName = appDefPath.getParent().getName();
// String packageVersion = StringUtils.EMPTY;
// if (instanceDefinition.isVersioned()) {
// packageVersion = packageName;
// packageName = appDefPath.getParent().getParent().getName();
// }
// println("%-25s %15s %30s %s", clusterName, packageName,
// packageVersion, appDefPathStr);
// }
// } catch (IOException e) {
// log.debug("{} application definition path {} is not found.", clusterName, appDefPathStr);
// }
// }
return EXIT_SUCCESS;
}
private int actionPackageList() throws IOException {
Path pkgPath = sliderFileSystem.buildPackageDirPath(StringUtils.EMPTY,
StringUtils.EMPTY);
log.info("Package install path : {}", pkgPath);
FileSystem sfs = sliderFileSystem.getFileSystem();
if (!sfs.isDirectory(pkgPath)) {
log.info("No package(s) installed");
return EXIT_SUCCESS;
}
FileStatus[] fileStatus = sfs.listStatus(pkgPath);
boolean hasPackage = false;
StringBuilder sb = new StringBuilder();
sb.append("List of installed packages:\n");
for (FileStatus fstat : fileStatus) {
if (fstat.isDirectory()) {
sb.append("\t").append(fstat.getPath().getName());
sb.append("\n");
hasPackage = true;
}
}
if (hasPackage) {
println(sb.toString());
} else {
log.info("No package(s) installed");
}
return EXIT_SUCCESS;
}
private void createSummaryMetainfoFile(Path srcFile, Path destFile,
boolean overwrite) throws IOException {
FileSystem srcFs = srcFile.getFileSystem(getConfig());
try (InputStream inputStreamJson = SliderUtils
.getApplicationResourceInputStream(srcFs, srcFile, "metainfo.json");
InputStream inputStreamXml = SliderUtils
.getApplicationResourceInputStream(srcFs, srcFile, "metainfo.xml");) {
InputStream inputStream = null;
Path summaryFileInFs = null;
if (inputStreamJson != null) {
inputStream = inputStreamJson;
summaryFileInFs = new Path(destFile.getParent(), destFile.getName()
+ ".metainfo.json");
log.info("Found JSON metainfo file in package");
} else if (inputStreamXml != null) {
inputStream = inputStreamXml;
summaryFileInFs = new Path(destFile.getParent(), destFile.getName()
+ ".metainfo.xml");
log.info("Found XML metainfo file in package");
}
if (inputStream != null) {
try (FSDataOutputStream dataOutputStream = sliderFileSystem
.getFileSystem().create(summaryFileInFs, overwrite)) {
log.info("Creating summary metainfo file");
IOUtils.copy(inputStream, dataOutputStream);
}
}
}
}
private int actionPackageInstall(ActionPackageArgs actionPackageArgs)
throws YarnException, IOException {
requireArgumentSet(Arguments.ARG_NAME, actionPackageArgs.name);
Path srcFile = extractPackagePath(actionPackageArgs.packageURI);
Path pkgPath = sliderFileSystem.buildPackageDirPath(actionPackageArgs.name,
actionPackageArgs.version);
FileSystem fs = sliderFileSystem.getFileSystem();
if (!fs.exists(pkgPath)) {
fs.mkdirs(pkgPath);
}
Path fileInFs = new Path(pkgPath, srcFile.getName());
require(actionPackageArgs.replacePkg || !fs.exists(fileInFs),
E_PACKAGE_EXISTS +" at %s. Use --replacepkg to overwrite.", fileInFs.toUri());
log.info("Installing package {} to {} (overwrite set to {})", srcFile,
fileInFs, actionPackageArgs.replacePkg);
fs.copyFromLocalFile(false, actionPackageArgs.replacePkg, srcFile, fileInFs);
createSummaryMetainfoFile(srcFile, fileInFs, actionPackageArgs.replacePkg);
String destPathWithHomeDir = Path
.getPathWithoutSchemeAndAuthority(fileInFs).toString();
String destHomeDir = Path.getPathWithoutSchemeAndAuthority(
fs.getHomeDirectory()).toString();
// a somewhat contrived approach to stripping out the home directory and any trailing
// separator; designed to work on windows and unix
String destPathWithoutHomeDir;
if (destPathWithHomeDir.startsWith(destHomeDir)) {
destPathWithoutHomeDir = destPathWithHomeDir.substring(destHomeDir.length());
if (destPathWithoutHomeDir.startsWith("/") || destPathWithoutHomeDir.startsWith("\\")) {
destPathWithoutHomeDir = destPathWithoutHomeDir.substring(1);
}
} else {
destPathWithoutHomeDir = destPathWithHomeDir;
}
log.info("Set " + AgentKeys.APP_DEF + " in your app config JSON to {}",
destPathWithoutHomeDir);
return EXIT_SUCCESS;
}
private Path extractPackagePath(String packageURI)
throws BadCommandArgumentsException {
require(isSet(packageURI), E_INVALID_APPLICATION_PACKAGE_LOCATION);
File pkgFile = new File(packageURI);
require(pkgFile.isFile(),
E_UNABLE_TO_READ_SUPPLIED_PACKAGE_FILE + ": " + pkgFile.getAbsolutePath());
return new Path(pkgFile.toURI());
}
private int actionPackageDelete(ActionPackageArgs actionPackageArgs) throws
YarnException, IOException {
requireArgumentSet(Arguments.ARG_NAME, actionPackageArgs.name);
Path pkgPath = sliderFileSystem.buildPackageDirPath(actionPackageArgs.name,
actionPackageArgs.version);
FileSystem fs = sliderFileSystem.getFileSystem();
require(fs.exists(pkgPath), E_PACKAGE_DOES_NOT_EXIST +": %s ", pkgPath.toUri());
log.info("Deleting package {} at {}.", actionPackageArgs.name, pkgPath);
if(fs.delete(pkgPath, true)) {
log.info("Deleted package {} " + actionPackageArgs.name);
return EXIT_SUCCESS;
} else {
log.warn("Package deletion failed.");
return EXIT_NOT_FOUND;
}
}
@Override
public int actionUpdate(String clustername,
AbstractClusterBuildingActionArgs buildInfo) throws
@ -1594,7 +1271,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
String PLACEHOLDER_PATTERN = "\\$\\{[^{]+\\}";
Pattern placeholderPattern = Pattern.compile(PLACEHOLDER_PATTERN);
Matcher placeholderMatcher = placeholderPattern.matcher(env);
Map<String, String> placeholderKeyValueMap = new HashMap<String, String>();
Map<String, String> placeholderKeyValueMap = new HashMap<>();
if (placeholderMatcher.find()) {
String placeholderKey = placeholderMatcher.group();
String systemKey = placeholderKey
@ -1865,16 +1542,20 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
@Override
@VisibleForTesting
public void actionFlex(String appName, ActionFlexArgs args)
public int actionFlex(String appName, ActionFlexArgs args)
throws YarnException, IOException {
Component component = new Component();
component.setNumberOfContainers(args.getNumberOfContainers());
if (StringUtils.isEmpty(args.getComponent())) {
component.setName("DEFAULT");
} else {
component.setName(args.getComponent());
Map<String, Long> componentCounts = new HashMap<>(args.getComponentMap()
.size());
for (Entry<String, String> entry : args.getComponentMap().entrySet()) {
long numberOfContainers = Long.parseLong(entry.getValue());
componentCounts.put(entry.getKey(), numberOfContainers);
}
flex(appName, component);
// throw usage exception if no changes proposed
if (componentCounts.size() == 0) {
actionHelp(ACTION_FLEX);
}
flex(appName, componentCounts);
return EXIT_SUCCESS;
}
@Override
@ -1966,19 +1647,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
return EXIT_SUCCESS;
}
@Override
public String actionEcho(String name, ActionEchoArgs args) throws
YarnException,
IOException {
String message = args.message;
if (message == null) {
throw new BadCommandArgumentsException("missing message");
}
SliderClusterOperations clusterOps =
new SliderClusterOperations(bondToCluster(name));
return clusterOps.echo(message);
}
/**
* Find an instance of an application belonging to the current user.
* @param appname application name
@ -2099,7 +1767,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
}
@Override
public void actionStop(String appName, ActionFreezeArgs freezeArgs)
public int actionStop(String appName, ActionFreezeArgs freezeArgs)
throws YarnException, IOException {
validateClusterName(appName);
ApplicationReport app = findInstance(appName);
@ -2112,7 +1780,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
.ordinal()) {
log.info("Application {} is in a terminated state {}", appName,
app.getYarnApplicationState());
return;
return EXIT_SUCCESS;
}
try {
@ -2127,6 +1795,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
+ " gracefully, forcefully kill the app.");
yarnClient.killApplication(app.getApplicationId(), freezeArgs.message);
}
return EXIT_SUCCESS;
}
@Override
@ -2143,30 +1812,30 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
return 0;
}
public long flex(String appName, Component component)
throws YarnException, IOException {
public Map<String, Long> flex(String appName, Map<String, Long>
componentCounts) throws YarnException, IOException {
validateClusterName(appName);
Path appDir = sliderFileSystem.buildClusterDirPath(appName);
Path appJson = new Path(appDir, appName + ".json");
Application persistedApp =
jsonSerDeser.load(sliderFileSystem.getFileSystem(), appJson);
long original = 0;
boolean foundComponent = false;
Map<String, Long> original = new HashMap<>(componentCounts.size());
for (Component persistedComp : persistedApp.getComponents()) {
if (persistedComp.getName().equals(component.getName())) {
original = persistedComp.getNumberOfContainers();
persistedComp.setNumberOfContainers(component.getNumberOfContainers());
foundComponent = true;
break;
String name = persistedComp.getName();
if (componentCounts.containsKey(persistedComp.getName())) {
original.put(name, persistedComp.getNumberOfContainers());
persistedComp.setNumberOfContainers(componentCounts.get(name));
}
}
if (!foundComponent) {
throw new YarnException("Component " + component.getName()
+ " does not exist in app definition.");
if (original.size() < componentCounts.size()) {
componentCounts.keySet().removeAll(original.keySet());
throw new YarnException("Components " + componentCounts.keySet()
+ " do not exist in app definition.");
}
jsonSerDeser
.save(sliderFileSystem.getFileSystem(), appJson, persistedApp, true);
log.info("Updated app definition file for component " + component);
log.info("Updated app definition file for components " + componentCounts
.keySet());
ApplicationReport instance = findInstance(appName);
if (instance != null) {
@ -2174,11 +1843,14 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
SliderClusterProtocol appMaster = connect(instance);
SliderClusterOperations clusterOps =
new SliderClusterOperations(appMaster);
clusterOps.flex(component);
log.info(
"Application name = " + appName + ", Component name = " + component
.getName() + ", number of containers updated from " + original
+ " to " + component.getNumberOfContainers());
clusterOps.flex(componentCounts);
for (Entry<String, Long> componentCount : componentCounts.entrySet()) {
log.info(
"Application name = " + appName + ", Component name = " +
componentCount.getKey() + ", number of containers updated " +
"from " + original.get(componentCount.getKey()) + " to " +
componentCount.getValue());
}
} else {
String message = "Application " + appName + "does not exist in RM. ";
throw new YarnException(message);

View File

@ -20,26 +20,18 @@ package org.apache.slider.client;
import org.apache.hadoop.registry.client.api.RegistryOperations;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.types.NodeInformationList;
import org.apache.slider.api.types.SliderInstanceDescription;
import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
import org.apache.slider.common.params.ActionAMSuicideArgs;
import org.apache.slider.common.params.ActionClientArgs;
import org.apache.slider.common.params.ActionDependencyArgs;
import org.apache.slider.common.params.ActionDestroyArgs;
import org.apache.slider.common.params.ActionDiagnosticArgs;
import org.apache.slider.common.params.ActionEchoArgs;
import org.apache.slider.common.params.ActionFlexArgs;
import org.apache.slider.common.params.ActionFreezeArgs;
import org.apache.slider.common.params.ActionInstallKeytabArgs;
import org.apache.slider.common.params.ActionInstallPackageArgs;
import org.apache.slider.common.params.ActionKeytabArgs;
import org.apache.slider.common.params.ActionNodesArgs;
import org.apache.slider.common.params.ActionPackageArgs;
import org.apache.slider.common.params.ActionKillContainerArgs;
import org.apache.slider.common.params.ActionListArgs;
import org.apache.slider.common.params.ActionRegistryArgs;
@ -53,7 +45,6 @@ import org.apache.slider.core.exceptions.SliderException;
import org.apache.slider.providers.AbstractClientProvider;
import java.io.IOException;
import java.util.Map;
/**
* Interface of those method calls in the slider API that are intended
@ -63,8 +54,7 @@ import java.util.Map;
*/
public interface SliderClientAPI extends Service {
void actionDestroy(String clustername) throws YarnException,
IOException;
int actionDestroy(String clustername) throws YarnException, IOException;
/**
* AM to commit an asynchronous suicide
@ -81,18 +71,6 @@ public interface SliderClientAPI extends Service {
AbstractClientProvider createClientProvider(String provider)
throws SliderException;
/**
* Upload keytab to a designated sub-directory of the user home directory
*
* @param installKeytabInfo the arguments needed to upload the keytab
* @throws YarnException Yarn problems
* @throws IOException other problems
* @throws BadCommandArgumentsException bad arguments.
* @deprecated use #actionKeytab
*/
int actionInstallKeytab(ActionInstallKeytabArgs installKeytabInfo)
throws YarnException, IOException;
/**
* Manage keytabs leveraged by slider
*
@ -104,17 +82,6 @@ public interface SliderClientAPI extends Service {
int actionKeytab(ActionKeytabArgs keytabInfo)
throws YarnException, IOException;
/**
* Upload application package to user home directory
*
* @param installPkgInfo the arguments needed to upload the package
* @throws YarnException Yarn problems
* @throws IOException other problems
* @throws BadCommandArgumentsException bad arguments.
*/
int actionInstallPkg(ActionInstallPackageArgs installPkgInfo)
throws YarnException, IOException;
/**
* Manage file resources leveraged by slider
*
@ -137,17 +104,6 @@ public interface SliderClientAPI extends Service {
int actionClient(ActionClientArgs clientInfo)
throws IOException, YarnException;
/**
* Managing slider application package
*
* @param pkgInfo the arguments needed to upload, delete or list the package
* @throws YarnException Yarn problems
* @throws IOException other problems
* @throws BadCommandArgumentsException bad arguments.
*/
int actionPackage(ActionPackageArgs pkgInfo)
throws YarnException, IOException;
/**
* Update the cluster specification
*
@ -179,7 +135,8 @@ public interface SliderClientAPI extends Service {
int actionList(String clustername, ActionListArgs args) throws IOException, YarnException;
void actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException;
int actionFlex(String name, ActionFlexArgs args) throws YarnException,
IOException;
/**
* Test for a cluster existing probe for a cluster of the given name existing
@ -199,17 +156,6 @@ public interface SliderClientAPI extends Service {
int actionKillContainer(String name, ActionKillContainerArgs args)
throws YarnException, IOException;
/**
* Echo operation (not currently wired up to command line)
* @param name cluster name
* @param args arguments
* @return the echoed text
* @throws YarnException
* @throws IOException
*/
String actionEcho(String name, ActionEchoArgs args)
throws YarnException, IOException;
/**
* Status operation
*
@ -246,7 +192,7 @@ public interface SliderClientAPI extends Service {
* @param freezeArgs arguments to the stop
* @return EXIT_SUCCESS if the cluster was not running by the end of the operation
*/
void actionStop(String clustername, ActionFreezeArgs freezeArgs)
int actionStop(String clustername, ActionFreezeArgs freezeArgs)
throws YarnException, IOException;
/**

View File

@ -25,7 +25,6 @@ import org.apache.slider.api.SliderClusterProtocol;
import org.apache.slider.api.StateValues;
import org.apache.slider.api.proto.Messages;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.api.types.ContainerInformation;
import org.apache.slider.api.types.NodeInformation;
import org.apache.slider.api.types.NodeInformationList;
@ -44,6 +43,8 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import static org.apache.slider.api.types.RestTypeMarshalling.unmarshall;
@ -283,12 +284,17 @@ public class SliderClusterOperations {
return state;
}
public void flex(Component component) throws IOException{
Messages.FlexComponentRequestProto request =
Messages.FlexComponentRequestProto.newBuilder()
.setNumberOfContainers(component.getNumberOfContainers().intValue())
.setName(component.getName()).build();
appMaster.flexComponent(request);
public void flex(Map<String, Long> componentCounts) throws IOException{
Messages.FlexComponentsRequestProto.Builder builder =
Messages.FlexComponentsRequestProto.newBuilder();
for (Entry<String, Long> componentCount : componentCounts.entrySet()) {
Messages.ComponentCountProto componentProto =
Messages.ComponentCountProto.newBuilder()
.setName(componentCount.getKey())
.setNumberOfContainers(componentCount.getValue()).build();
builder.addComponents(componentProto);
}
appMaster.flexComponents(builder.build());
}
/**

View File

@ -18,8 +18,6 @@
package org.apache.slider.common;
import org.apache.hadoop.registry.client.api.RegistryConstants;
/**
* These are the keys that can be added to <code>conf/slider-client.xml</code>.
*/
@ -105,23 +103,6 @@ public interface SliderXmlConfKeys {
*/
String DEFAULT_DATA_DIRECTORY_PERMISSIONS = "750";
/**
*
* Use {@link RegistryConstants#KEY_REGISTRY_ZK_ROOT}
*
*/
@Deprecated
String REGISTRY_PATH = "slider.registry.path";
/**
*
* @Deprecated use {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
*
*/
@Deprecated
String REGISTRY_ZK_QUORUM = "slider.zookeeper.quorum";
String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH =
"ipc.client.fallback-to-simple-auth-allowed";
String HADOOP_HTTP_FILTER_INITIALIZERS =

View File

@ -19,6 +19,13 @@
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParametersDelegate;
import com.google.common.annotations.VisibleForTesting;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import java.io.File;
import java.util.List;
import java.util.Map;
/**
* Abstract Action to build things; shares args across build and
@ -26,6 +33,14 @@ import com.beust.jcommander.Parameter;
*/
public abstract class AbstractClusterBuildingActionArgs
extends AbstractActionArgs {
@Parameter(names = {ARG_APPDEF},
description = "Template application definition file in JSON format.")
public File appDef;
public File getAppDef() {
return appDef;
}
@Parameter(names = {
ARG_QUEUE }, description = "Queue to submit the application")
public String queue;
@ -33,4 +48,42 @@ public abstract class AbstractClusterBuildingActionArgs
@Parameter(names = {
ARG_LIFETIME }, description = "Lifetime of the application from the time of request")
public long lifetime;
@ParametersDelegate
public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
@ParametersDelegate
public OptionArgsDelegate optionsDelegate =
new OptionArgsDelegate();
public Map<String, String> getOptionsMap() throws
BadCommandArgumentsException {
return optionsDelegate.getOptionsMap();
}
/**
* Get the role heap mapping (may be empty, but never null).
* @return role heap mapping
* @throws BadCommandArgumentsException parse problem
*/
public Map<String, Map<String, String>> getCompOptionMap() throws
BadCommandArgumentsException {
return optionsDelegate.getCompOptionMap();
}
@VisibleForTesting
public List<String> getComponentTuples() {
return componentDelegate.getComponentTuples();
}
/**
* Get the role mapping (may be empty, but never null).
* @return role mapping
* @throws BadCommandArgumentsException parse problem
*/
public Map<String, String> getComponentMap() throws
BadCommandArgumentsException {
return componentDelegate.getComponentMap();
}
}

View File

@ -15,19 +15,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
@Parameters(commandNames = {SliderActions.ACTION_BUILD},
commandDescription = SliderActions.DESCRIBE_ACTION_BUILD)
public class ActionBuildArgs extends AbstractClusterBuildingActionArgs {
public class ActionEchoArgs extends AbstractActionArgs {
@Override
public String getActionName() {
return SliderActions.ACTION_ECHO;
return SliderActions.ACTION_BUILD;
}
@Parameter(names = {ARG_MESSAGE},
description = "message to echo")
public String message;
}

View File

@ -18,25 +18,13 @@
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
import com.beust.jcommander.ParametersDelegate;
import java.io.File;
@Parameters(commandNames = {SliderActions.ACTION_CREATE},
commandDescription = SliderActions.DESCRIBE_ACTION_CREATE)
public class ActionCreateArgs extends AbstractClusterBuildingActionArgs {
@Parameter(names = {ARG_APPDEF},
description = "Template application definition file in JSON format.")
public File appDef;
public File getAppDef() {
return appDef;
}
@Override
public String getActionName() {
return SliderActions.ACTION_CREATE;

View File

@ -18,31 +18,38 @@
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
import com.beust.jcommander.ParametersDelegate;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import java.util.List;
import java.util.Map;
@Parameters(commandNames = {SliderActions.ACTION_FLEX},
commandDescription = SliderActions.DESCRIBE_ACTION_FLEX)
public class ActionFlexArgs extends AbstractActionArgs {
@Parameter(names = {ARG_COMPONENT},
description = "component name")
String componentName;
@Parameter(names = {ARG_COUNT},
description = "number of containers>")
long numberOfContainers;
@Override
public String getActionName() {
return SliderActions.ACTION_FLEX;
}
public String getComponent() {
return componentName;
@ParametersDelegate
public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
/**
* Get the component mapping (may be empty, but never null)
* @return mapping
* @throws BadCommandArgumentsException parse problem
*/
public Map<String, String> getComponentMap() throws
BadCommandArgumentsException {
return componentDelegate.getComponentMap();
}
public long getNumberOfContainers() {
return numberOfContainers;
public List<String> getComponentTuples() {
return componentDelegate.getComponentTuples();
}
}

View File

@ -1,57 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
@Parameters(commandNames = {SliderActions.ACTION_INSTALL_KEYTAB},
commandDescription = SliderActions.DESCRIBE_ACTION_INSTALL_KEYTAB)
public class ActionInstallKeytabArgs extends AbstractActionArgs {
@Override
public String getActionName() {
return SliderActions.ACTION_INSTALL_KEYTAB;
}
@Parameter(names = {ARG_KEYTAB},
description = "Path to keytab on local disk")
public String keytabUri;
@Parameter(names = {ARG_FOLDER},
description = "The name of the folder in which to store the keytab")
public String folder;
@Parameter(names = {ARG_OVERWRITE}, description = "Overwrite existing keytab")
public boolean overwrite = false;
/**
* Get the min #of params expected
* @return the min number of params in the {@link #parameters} field
*/
public int getMinParams() {
return 0;
}
@Override
public int getMaxParams() {
return 3;
}
}

View File

@ -1,58 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
import com.beust.jcommander.ParametersDelegate;
@Parameters(commandNames = {SliderActions.ACTION_INSTALL_PACKAGE},
commandDescription = SliderActions.DESCRIBE_ACTION_INSTALL_PACKAGE)
public class ActionInstallPackageArgs extends AbstractActionArgs {
@Override
public String getActionName() {
return SliderActions.ACTION_INSTALL_PACKAGE;
}
@Parameter(names = {ARG_PACKAGE},
description = "Path to app package on local disk")
public String packageURI;
@Parameter(names = {ARG_NAME},
description = "The type of the package")
public String name;
@Parameter(names = {ARG_REPLACE_PKG}, description = "Overwrite existing package")
public boolean replacePkg = false;
/**
* Get the min #of params expected
* @return the min number of params in the {@link #parameters} field
*/
public int getMinParams() {
return 0;
}
@Override
public int getMaxParams() {
return 1;
}
}

View File

@ -26,13 +26,6 @@ import com.beust.jcommander.Parameters;
public class ActionKeytabArgs extends AbstractActionArgs {
public ActionKeytabArgs(ActionInstallKeytabArgs installKeytabInfo) {
this.install = true;
this.overwrite = installKeytabInfo.overwrite;
this.keytab = installKeytabInfo.keytabUri;
this.folder = installKeytabInfo.folder;
}
public ActionKeytabArgs() {
super();
}

View File

@ -1,81 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
@Parameters(commandNames = {SliderActions.ACTION_PACKAGE},
commandDescription = SliderActions.DESCRIBE_ACTION_PACKAGE)
public class ActionPackageArgs extends AbstractActionArgs {
@Override
public String getActionName() {
return SliderActions.ACTION_PACKAGE;
}
@Parameter(names = {ARG_INSTALL},
description = "Install package in the sub-folder 'package' of the user's Slider base directory")
public boolean install;
@Parameter(names = {ARG_PKGDELETE},
description = "Delete package operation")
public boolean delete;
@Parameter(names = {ARG_PKGLIST},
description = "List of package(s) installed")
public boolean list;
@Parameter(names = {ARG_PKGINSTANCES},
description = "Lists all application instances referring to package")
public boolean instances;
@Parameter(names = {ARG_PACKAGE},
description = "Path to app package on local disk")
public String packageURI;
@Parameter(names = {ARG_NAME},
description = "Package name")
public String name;
@Parameter(names = {ARG_VERSION}, description = "Package version")
public String version;
@Parameter(names = {ARG_REPLACE_PKG},
description = "Overwrite existing package")
public boolean replacePkg = false;
@Parameter(names = {ARG_OUTPUT, ARG_OUTPUT_SHORT},
description = "Output file for package data")
public String out;
/**
* Get the min #of params expected
* @return the min number of params in the {@link #parameters} field
*/
public int getMinParams() {
return 0;
}
@Override
public int getMaxParams() {
return 1;
}
}

View File

@ -26,7 +26,7 @@ import java.util.List;
@Parameters(commandNames = { SliderActions.ACTION_UPGRADE },
commandDescription = SliderActions.DESCRIBE_ACTION_UPGRADE)
public class ActionUpgradeArgs extends AbstractActionArgs {
public class ActionUpgradeArgs extends AbstractClusterBuildingActionArgs {
@Override
public String getActionName() {

View File

@ -1,111 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Delegate for application and resource options
*/
public class AppAndResouceOptionArgsDelegate extends AbstractArgsDelegate {
/**
* Options key value
*/
@Parameter(names = {ARG_OPTION, ARG_OPTION_SHORT}, arity = 2,
description = ARG_OPTION + "<name> <value>",
splitter = DontSplitArguments.class)
public List<String> optionTuples = new ArrayList<>(0);
/**
* All the app component option triples
*/
@Parameter(names = {ARG_COMP_OPT, ARG_COMP_OPT_SHORT, ARG_ROLEOPT}, arity = 3,
description = "Component option " + ARG_COMP_OPT +
" <component> <name> <option>",
splitter = DontSplitArguments.class)
public List<String> compOptTriples = new ArrayList<>(0);
/**
* Resource Options
*/
@Parameter(names = {ARG_RESOURCE_OPT, ARG_RESOURCE_OPT_SHORT}, arity = 2,
description = "Resource option "+ ARG_RESOURCE_OPT + "<name> <value>",
splitter = DontSplitArguments.class)
public List<String> resOptionTuples = new ArrayList<>(0);
/**
* All the resource component option triples
*/
@Parameter(names = {ARG_RES_COMP_OPT, ARG_RES_COMP_OPT_SHORT,}, arity = 3,
description = "Component resource option " + ARG_RES_COMP_OPT +
" <component> <name> <option>",
splitter = DontSplitArguments.class)
public List<String> resCompOptTriples = new ArrayList<>(0);
public Map<String, String> getOptionsMap() throws
BadCommandArgumentsException {
return convertTupleListToMap(ARG_OPTION, optionTuples);
}
/**
* Get the role heap mapping (may be empty, but never null)
* @return role heap mapping
* @throws BadCommandArgumentsException parse problem
*/
public Map<String, Map<String, String>> getCompOptionMap() throws
BadCommandArgumentsException {
return convertTripleListToMaps(ARG_COMP_OPT, compOptTriples);
}
public Map<String, String> getResourceOptionsMap() throws
BadCommandArgumentsException {
return convertTupleListToMap(ARG_RESOURCE_OPT, resOptionTuples);
}
/**
* Get the role heap mapping (may be empty, but never null)
* @return role heap mapping
* @throws BadCommandArgumentsException parse problem
*/
public Map<String, Map<String, String>> getResourceCompOptionMap() throws
BadCommandArgumentsException {
return convertTripleListToMaps(ARG_RES_COMP_OPT, resCompOptTriples);
}
public void setOption(String key, String value) {
optionTuples.add(key);
optionTuples.add(value);
}
public void setResourceOption(String key, String value) {
resOptionTuples.add(key);
resOptionTuples.add(value);
}
}

View File

@ -107,12 +107,7 @@ public interface Arguments {
String ARG_LIFETIME = "--lifetime";
String ARG_REPLACE_PKG = "--replacepkg";
String ARG_RESOURCE = "--resource";
String ARG_RESOURCES = "--resources";
String ARG_RES_COMP_OPT = "--rescompopt";
String ARG_RES_COMP_OPT_SHORT = "--rco";
String ARG_RESOURCE_MANAGER = "--rm";
String ARG_RESOURCE_OPT = "--resopt";
String ARG_RESOURCE_OPT_SHORT = "-ro";
String ARG_SECURE = "--secure";
String ARG_SERVICETYPE = "--servicetype";
String ARG_SERVICES = "--services";
@ -120,7 +115,6 @@ public interface Arguments {
String ARG_SOURCE = "--source";
String ARG_STATE = "--state";
String ARG_SYSPROP = "-S";
String ARG_TEMPLATE = "--template";
String ARG_TRUSTSTORE = "--truststore";
String ARG_USER = "--user";
String ARG_UPLOAD = "--upload";
@ -136,29 +130,9 @@ public interface Arguments {
RIGHT PLACE IN THE LIST
*/
/**
* Deprecated: use ARG_COMPONENT
*/
@Deprecated
String ARG_ROLE = "--role";
/**
* Deprecated: use ARG_COMP_OPT
*/
@Deprecated
String ARG_ROLEOPT = "--roleopt";
/**
* server: URI for the cluster
*/
String ARG_CLUSTER_URI = "-cluster-uri";
/**
* server: Path for the resource manager instance (required)
*/
String ARG_RM_ADDR = "--rm";
}

View File

@ -53,6 +53,7 @@ public class ClientArgs extends CommonArgs {
// =========================================================
private final ActionAMSuicideArgs actionAMSuicideArgs = new ActionAMSuicideArgs();
private final ActionBuildArgs actionBuildArgs = new ActionBuildArgs();
private final ActionClientArgs actionClientArgs = new ActionClientArgs();
private final ActionCreateArgs actionCreateArgs = new ActionCreateArgs();
private final ActionDependencyArgs actionDependencyArgs = new ActionDependencyArgs();
@ -62,8 +63,6 @@ public class ClientArgs extends CommonArgs {
private final ActionFlexArgs actionFlexArgs = new ActionFlexArgs();
private final ActionFreezeArgs actionFreezeArgs = new ActionFreezeArgs();
private final ActionHelpArgs actionHelpArgs = new ActionHelpArgs();
private final ActionInstallPackageArgs actionInstallPackageArgs = new ActionInstallPackageArgs();
private final ActionInstallKeytabArgs actionInstallKeytabArgs = new ActionInstallKeytabArgs();
private final ActionKDiagArgs actionKDiagArgs = new ActionKDiagArgs();
private final ActionKeytabArgs actionKeytabArgs = new ActionKeytabArgs();
private final ActionKillContainerArgs actionKillContainerArgs =
@ -71,7 +70,6 @@ public class ClientArgs extends CommonArgs {
private final ActionListArgs actionListArgs = new ActionListArgs();
private final ActionLookupArgs actionLookupArgs = new ActionLookupArgs();
private final ActionNodesArgs actionNodesArgs = new ActionNodesArgs();
private final ActionPackageArgs actionPackageArgs = new ActionPackageArgs();
private final ActionRegistryArgs actionRegistryArgs = new ActionRegistryArgs();
private final ActionResolveArgs actionResolveArgs = new ActionResolveArgs();
private final ActionResourceArgs actionResourceArgs = new ActionResourceArgs();
@ -95,6 +93,7 @@ public class ClientArgs extends CommonArgs {
addActions(
actionAMSuicideArgs,
actionBuildArgs,
actionClientArgs,
actionCreateArgs,
actionDependencyArgs,
@ -104,15 +103,12 @@ public class ClientArgs extends CommonArgs {
actionFlexArgs,
actionFreezeArgs,
actionHelpArgs,
actionInstallKeytabArgs,
actionInstallPackageArgs,
actionKDiagArgs,
actionKeytabArgs,
actionKillContainerArgs,
actionListArgs,
actionLookupArgs,
actionNodesArgs,
actionPackageArgs,
actionRegistryArgs,
actionResolveArgs,
actionResourceArgs,
@ -153,14 +149,12 @@ public class ClientArgs extends CommonArgs {
return actionAMSuicideArgs;
}
public ActionInstallPackageArgs getActionInstallPackageArgs() { return actionInstallPackageArgs; }
public ActionBuildArgs getActionBuildArgs() {
return actionBuildArgs;
}
public ActionClientArgs getActionClientArgs() { return actionClientArgs; }
public ActionPackageArgs getActionPackageArgs() { return actionPackageArgs; }
public ActionInstallKeytabArgs getActionInstallKeytabArgs() { return actionInstallKeytabArgs; }
public ActionKDiagArgs getActionKDiagArgs() {
return actionKDiagArgs;
}
@ -250,6 +244,12 @@ public class ClientArgs extends CommonArgs {
action = ACTION_HELP;
}
switch (action) {
case ACTION_BUILD:
bindCoreAction(actionBuildArgs);
//its a builder, so set those actions too
buildingActionArgs = actionBuildArgs;
break;
case ACTION_CREATE:
bindCoreAction(actionCreateArgs);
//its a builder, so set those actions too
@ -296,14 +296,6 @@ public class ClientArgs extends CommonArgs {
bindCoreAction(actionHelpArgs);
break;
case ACTION_INSTALL_KEYTAB:
bindCoreAction(actionInstallKeytabArgs);
break;
case ACTION_INSTALL_PACKAGE:
bindCoreAction(actionInstallPackageArgs);
break;
case ACTION_KDIAG:
bindCoreAction(actionKDiagArgs);
break;
@ -328,10 +320,6 @@ public class ClientArgs extends CommonArgs {
bindCoreAction(actionNodesArgs);
break;
case ACTION_PACKAGE:
bindCoreAction(actionPackageArgs);
break;
case ACTION_REGISTRY:
bindCoreAction(actionRegistryArgs);
break;

View File

@ -30,7 +30,7 @@ public class ComponentArgsDelegate extends AbstractArgsDelegate {
/**
* This is a listing of the roles to create
*/
@Parameter(names = {ARG_COMPONENT, ARG_COMPONENT_SHORT, ARG_ROLE},
@Parameter(names = {ARG_COMPONENT, ARG_COMPONENT_SHORT},
arity = 2,
description = "--component <name> <count> e.g. +1 incr by 1, -2 decr by 2, and 3 makes final count 3",
splitter = DontSplitArguments.class)

View File

@ -0,0 +1,66 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.params;
import com.beust.jcommander.Parameter;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Delegate for application and resource options.
*/
public class OptionArgsDelegate extends AbstractArgsDelegate {
/**
* Options key value.
*/
@Parameter(names = {ARG_OPTION, ARG_OPTION_SHORT}, arity = 2,
description = ARG_OPTION + "<name> <value>",
splitter = DontSplitArguments.class)
public List<String> optionTuples = new ArrayList<>(0);
/**
* All the app component option triples.
*/
@Parameter(names = {ARG_COMP_OPT, ARG_COMP_OPT_SHORT}, arity = 3,
description = "Component option " + ARG_COMP_OPT +
" <component> <name> <option>",
splitter = DontSplitArguments.class)
public List<String> compOptTriples = new ArrayList<>(0);
public Map<String, String> getOptionsMap() throws
BadCommandArgumentsException {
return convertTupleListToMap(ARG_OPTION, optionTuples);
}
/**
* Get the role heap mapping (may be empty, but never null).
* @return role heap mapping
* @throws BadCommandArgumentsException parse problem
*/
public Map<String, Map<String, String>> getCompOptionMap()
throws BadCommandArgumentsException {
return convertTripleListToMaps(ARG_COMP_OPT, compOptTriples);
}
}

View File

@ -39,14 +39,12 @@ public interface SliderActions {
String ACTION_STOP = "stop";
String ACTION_HELP = "help";
String ACTION_INSTALL_KEYTAB = "install-keytab";
String ACTION_INSTALL_PACKAGE = "install-package";
String ACTION_KDIAG = "kdiag";
String ACTION_KEYTAB = "keytab";
String ACTION_KILL_CONTAINER = "kill-container";
String ACTION_LIST = "list";
String ACTION_LOOKUP = "lookup";
String ACTION_NODES = "nodes";
String ACTION_PACKAGE = "package";
String ACTION_PREFLIGHT = "preflight";
String ACTION_RECONFIGURE = "reconfigure";
String ACTION_REGISTRY = "registry";
@ -99,12 +97,7 @@ public interface SliderActions {
"Start a stopped application";
String DESCRIBE_ACTION_VERSION =
"Print the Slider version information";
String DESCRIBE_ACTION_INSTALL_PACKAGE = "Install application package." +
" Deprecated, use '" + ACTION_PACKAGE + " " + ClientArgs.ARG_INSTALL + "'.";
String DESCRIBE_ACTION_PACKAGE = "Install/list/delete application packages and list app instances that use the packages";
String DESCRIBE_ACTION_CLIENT = "Install the application client in the specified directory or obtain a client keystore or truststore";
String DESCRIBE_ACTION_INSTALL_KEYTAB = "Install the Kerberos keytab." +
" Deprecated, use '" + ACTION_KEYTAB + " " + ClientArgs.ARG_INSTALL + "'.";
String DESCRIBE_ACTION_KEYTAB = "Manage a Kerberos keytab file (install, delete, list) in the sub-folder 'keytabs' of the user's Slider base directory";
String DESCRIBE_ACTION_DIAGNOSTIC = "Diagnose the configuration of the running slider application and slider client";
String DESCRIBE_ACTION_RESOURCE = "Manage a file (install, delete, list) in the 'resources' sub-folder of the user's Slider base directory";

View File

@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.registry.client.api.RegistryConstants;
import org.apache.slider.common.SliderKeys;
import org.apache.slider.common.SliderXmlConfKeys;
import org.apache.slider.core.exceptions.BadConfigException;
@ -613,13 +612,6 @@ public class ConfigHelper {
* Register anything we consider deprecated
*/
public static void registerDeprecatedConfigItems() {
Configuration.addDeprecation(
SliderXmlConfKeys.REGISTRY_ZK_QUORUM,
RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
Configuration.addDeprecation(
SliderXmlConfKeys.REGISTRY_PATH,
RegistryConstants.KEY_REGISTRY_ZK_ROOT);
}
/**

View File

@ -21,14 +21,10 @@ package org.apache.slider.common.tools;
import com.google.common.base.Preconditions;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -52,6 +48,8 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.Slider;
import org.apache.slider.api.RoleKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.api.types.ContainerInformation;
import org.apache.slider.common.SliderKeys;
import org.apache.slider.common.SliderXmlConfKeys;
@ -70,7 +68,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
@ -501,6 +498,26 @@ public final class SliderUtils {
return sw.toString();
}
/**
* Extract the first line of a multi-line string. This is typically used to
* prune the stack trace appended to the end of exception messages returned by
* YARN in AMRMClientAsync callbacks.
*
* @param msg
* message string (most likely multi-lines)
* @return the first line of a multi-line string or the original string if it
* is a null, empty or single-line
*/
public static String extractFirstLine(String msg) {
if (StringUtils.isNotBlank(msg)) {
int newlineIndex = msg.indexOf(System.lineSeparator());
if (newlineIndex != -1) {
msg = msg.substring(0, newlineIndex);
}
}
return msg;
}
/**
* Create a configuration with Slider-specific tuning.
* This is done rather than doing custom configs.
@ -2046,48 +2063,6 @@ public final class SliderUtils {
}
}
public static InputStream getApplicationResourceInputStream(FileSystem fs,
Path appPath,
String entry)
throws IOException {
InputStream is = null;
try(FSDataInputStream appStream = fs.open(appPath)) {
ZipArchiveInputStream zis = new ZipArchiveInputStream(appStream);
ZipArchiveEntry zipEntry;
boolean done = false;
while (!done && (zipEntry = zis.getNextZipEntry()) != null) {
if (entry.equals(zipEntry.getName())) {
int size = (int) zipEntry.getSize();
if (size != -1) {
log.info("Reading {} of size {}", zipEntry.getName(),
zipEntry.getSize());
byte[] content = new byte[size];
int offset = 0;
while (offset < size) {
offset += zis.read(content, offset, size - offset);
}
is = new ByteArrayInputStream(content);
} else {
log.debug("Size unknown. Reading {}", zipEntry.getName());
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
while (true) {
int byteRead = zis.read();
if (byteRead == -1) {
break;
}
baos.write(byteRead);
}
is = new ByteArrayInputStream(baos.toByteArray());
}
}
done = true;
}
}
}
return is;
}
/**
* Check for any needed libraries being present. On Unix none are needed;
* on windows they must be present
@ -2525,4 +2500,53 @@ public final class SliderUtils {
return EnumSet.range(YarnApplicationState.FINISHED,
YarnApplicationState.KILLED);
}
public static final String DAYS = ".days";
public static final String HOURS = ".hours";
public static final String MINUTES = ".minutes";
public static final String SECONDS = ".seconds";
/**
* Get the time range of a set of keys.
* @param conf configuration to read properties from
* @param basekey base key to which suffix gets applied
* @param defDays
* @param defHours
* @param defMins
* @param defSecs
* @return the aggregate time range in seconds
*/
public static long getTimeRange(org.apache.slider.api.resource
.Configuration conf,
String basekey,
long defDays,
long defHours,
long defMins,
long defSecs) {
Preconditions.checkArgument(basekey != null);
long days = conf.getPropertyLong(basekey + DAYS, defDays);
long hours = conf.getPropertyLong(basekey + HOURS, defHours);
long minutes = conf.getPropertyLong(basekey + MINUTES, defMins);
long seconds = conf.getPropertyLong(basekey + SECONDS, defSecs);
// range check
Preconditions.checkState(days >= 0 && hours >= 0 && minutes >= 0
&& seconds >= 0,
"Time range for %s has negative time component %s:%s:%s:%s",
basekey, days, hours, minutes, seconds);
// calculate total time, schedule the reset if expected
long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
return totalMinutes * 60 + seconds;
}
public static void resolve(Application application) {
org.apache.slider.api.resource.Configuration global = application
.getConfiguration();
for (Component component : application.getComponents()) {
mergeMapsIgnoreDuplicateKeys(component.getConfiguration().getProperties(),
global.getProperties());
}
// TODO merge other information to components
}
}

View File

@ -43,7 +43,6 @@ import org.slf4j.LoggerFactory;
import java.lang.reflect.Method;
import java.lang.reflect.InvocationTargetException;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;

View File

@ -337,5 +337,12 @@ public class ZKIntegration implements Watcher, Closeable {
return SVC_SLIDER_USERS + "/" + username;
}
/**
* Blocking enum of users.
* @return an unordered list of clusters under a user
*/
public List<String> getClusters() throws KeeperException,
InterruptedException {
return zookeeper.getChildren(userPath, null);
}
}

View File

@ -18,6 +18,7 @@
package org.apache.slider.providers;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Component;
/**
@ -36,6 +37,14 @@ public final class ProviderRole {
public final String labelExpression;
public final Component component;
public ProviderRole(String name, int id) {
this(name,
id,
PlacementPolicy.DEFAULT,
ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
ResourceKeys.DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS,
ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
}
/**
* Create a provider role
@ -59,7 +68,8 @@ public final class ProviderRole {
policy,
nodeFailureThreshold,
placementTimeoutSeconds,
labelExpression, null);
labelExpression,
new Component().name(name).numberOfContainers(0L));
}
/**

View File

@ -97,7 +97,6 @@ import org.apache.slider.common.tools.PortScanner;
import org.apache.slider.common.tools.SliderFileSystem;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.common.tools.SliderVersionInfo;
import org.apache.slider.core.conf.MapOperations;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.core.exceptions.SliderException;
import org.apache.slider.core.exceptions.SliderInternalStateException;
@ -855,7 +854,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
providerService.setAMState(stateForProviders);
// chaos monkey
// maybeStartMonkey();
maybeStartMonkey();
// if not a secure cluster, extract the username -it will be
// propagated to workers
@ -1597,7 +1596,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
* @throws SliderException slider problems, including invalid configs
* @throws IOException IO problems
*/
public void flexCluster(Messages.FlexComponentRequestProto request)
public void flexCluster(Messages.FlexComponentsRequestProto request)
throws IOException, SliderException {
if (request != null) {
appState.updateComponents(request);
@ -1619,24 +1618,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
ResetFailureWindow reset = new ResetFailureWindow(rmOperationHandler);
long days =
conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".days",
ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS);
long hours =
conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".hours",
ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS);
long minutes =
conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".minutes",
ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES);
long seconds =
conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".seconds",
0);
Preconditions
.checkState(days >= 0 && hours >= 0 && minutes >= 0 && seconds >= 0,
"Time range for has negative time component %s:%s:%s:%s", days,
hours, minutes, seconds);
long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
long totalSeconds = totalMinutes * 60 + seconds;
long totalSeconds = SliderUtils.getTimeRange(conf,
ResourceKeys.CONTAINER_FAILURE_WINDOW,
ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS,
ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS,
ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES,
0);
if (totalSeconds > 0) {
log.info("Scheduling the failure window reset interval to every {}"
+ " seconds", totalSeconds);
@ -1810,12 +1797,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
LOG_YARN.error("AMRMClientAsync.onError() received {}", e, e);
signalAMComplete(new ActionStopSlider("stop", EXIT_EXCEPTION_THROWN,
FinalApplicationStatus.FAILED,
"AMRMClientAsync.onError() received " + e));
SliderUtils.extractFirstLine(e.getLocalizedMessage())));
} else if (e instanceof InvalidApplicationMasterRequestException) {
// halt the AM
LOG_YARN.error("AMRMClientAsync.onError() received {}", e, e);
queue(new ActionHalt(EXIT_EXCEPTION_THROWN,
"AMRMClientAsync.onError() received " + e));
SliderUtils.extractFirstLine(e.getLocalizedMessage())));
} else {
// ignore and log
LOG_YARN.info("Ignoring AMRMClientAsync.onError() received {}", e);
@ -2040,7 +2027,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
*/
public void onExceptionInThread(Thread thread, Throwable exception) {
log.error("Exception in {}: {}", thread.getName(), exception, exception);
// if there is a teardown in progress, ignore it
if (amCompletionFlag.get()) {
log.info("Ignoring exception: shutdown in progress");
@ -2052,26 +2039,27 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
signalAMComplete(new ActionStopSlider("stop",
exitCode,
FinalApplicationStatus.FAILED,
exception.toString()));
SliderUtils.extractFirstLine(exception.getLocalizedMessage())));
}
}
/**
* TODO Start the chaos monkey
* TODO Read chaos monkey params from AM configuration rather than app
* configuration
* @return true if it started
*/
private boolean maybeStartMonkey() {
// MapOperations internals = getGlobalInternalOptions();
MapOperations internals = new MapOperations();
Boolean enabled =
internals.getOptionBool(InternalKeys.CHAOS_MONKEY_ENABLED,
InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);
org.apache.slider.api.resource.Configuration configuration =
application.getConfiguration();
boolean enabled = configuration.getPropertyBool(
InternalKeys.CHAOS_MONKEY_ENABLED,
InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);
if (!enabled) {
log.debug("Chaos monkey disabled");
return false;
}
long monkeyInterval = internals.getTimeRange(
long monkeyInterval = SliderUtils.getTimeRange(configuration,
InternalKeys.CHAOS_MONKEY_INTERVAL,
InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS,
InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS,
@ -2083,7 +2071,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
return false;
}
long monkeyDelay = internals.getTimeRange(
long monkeyDelay = SliderUtils.getTimeRange(configuration,
InternalKeys.CHAOS_MONKEY_DELAY,
0,
0,
@ -2098,10 +2086,11 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
// configure the targets
// launch failure: special case with explicit failure triggered now
int amLaunchFailProbability = internals.getOptionInt(
int amLaunchFailProbability = configuration.getPropertyInt(
InternalKeys.CHAOS_MONKEY_PROBABILITY_AM_LAUNCH_FAILURE,
0);
if (amLaunchFailProbability> 0 && monkey.chaosCheck(amLaunchFailProbability)) {
if (amLaunchFailProbability > 0 && monkey.chaosCheck(
amLaunchFailProbability)) {
log.info("Chaos Monkey has triggered AM Launch failure");
// trigger a failure
ActionStopSlider stop = new ActionStopSlider("stop",
@ -2112,12 +2101,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
queue(stop);
}
int amKillProbability = internals.getOptionInt(
int amKillProbability = configuration.getPropertyInt(
InternalKeys.CHAOS_MONKEY_PROBABILITY_AM_FAILURE,
InternalKeys.DEFAULT_CHAOS_MONKEY_PROBABILITY_AM_FAILURE);
monkey.addTarget("AM killer",
new ChaosKillAM(actionQueues, -1), amKillProbability);
int containerKillProbability = internals.getOptionInt(
int containerKillProbability = configuration.getPropertyInt(
InternalKeys.CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE,
InternalKeys.DEFAULT_CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE);
monkey.addTarget("Container killer",

View File

@ -26,9 +26,9 @@ import java.util.concurrent.TimeUnit;
public class ActionFlexCluster extends AsyncAction {
final Messages.FlexComponentRequestProto requestProto;
final Messages.FlexComponentsRequestProto requestProto;
public ActionFlexCluster(String name, long delay, TimeUnit timeUnit,
Messages.FlexComponentRequestProto requestProto) {
Messages.FlexComponentsRequestProto requestProto) {
super(name, delay, timeUnit, ATTR_CHANGES_APP_SIZE);
this.requestProto = requestProto;
}

View File

@ -37,13 +37,13 @@ import static org.apache.hadoop.metrics2.lib.Interns.info;
@Metrics(context = "yarn-native-service")
public class SliderMetrics implements MetricsSource {
@Metric("containers pending")
public MutableGaugeInt containersPending;
@Metric("containers requested")
public MutableGaugeInt containersRequested;
@Metric("anti-affinity containers pending")
public MutableGaugeInt pendingAAContainers;
@Metric("containers pending")
@Metric("containers running")
public MutableGaugeInt containersRunning;
@Metric("containers requested")
@Metric("containers desired")
public MutableGaugeInt containersDesired;
@Metric("containers completed")
public MutableGaugeInt containersCompleted;
@ -53,8 +53,12 @@ public class SliderMetrics implements MetricsSource {
public MutableGaugeInt failedSinceLastThreshold;
@Metric("containers preempted")
public MutableGaugeInt containersPreempted;
@Metric("containers exceeded limits")
public MutableGaugeInt containersLimitsExceeded;
@Metric("containers surplus")
public MutableGaugeInt surplusContainers;
@Metric("containers failed due to disk failure")
public MutableGaugeInt containersDiskFailure;
protected final MetricsRegistry registry;

View File

@ -70,11 +70,11 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
}
@Override
public Messages.FlexComponentResponseProto flexComponent(
RpcController controller, Messages.FlexComponentRequestProto request)
public Messages.FlexComponentsResponseProto flexComponents(
RpcController controller, Messages.FlexComponentsRequestProto request)
throws ServiceException {
try {
return real.flexComponent(request);
return real.flexComponents(request);
} catch (IOException e) {
throw wrap(e);
}

View File

@ -110,10 +110,10 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
}
@Override
public Messages.FlexComponentResponseProto flexComponent(
Messages.FlexComponentRequestProto request) throws IOException {
public Messages.FlexComponentsResponseProto flexComponents(
Messages.FlexComponentsRequestProto request) throws IOException {
try {
return endpoint.flexComponent(NULL_CONTROLLER, request);
return endpoint.flexComponents(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw convert(e);
}

View File

@ -196,11 +196,11 @@ public class SliderIPCService extends AbstractService
}
@Override
public Messages.FlexComponentResponseProto flexComponent(
Messages.FlexComponentRequestProto request) throws IOException {
public Messages.FlexComponentsResponseProto flexComponents(
Messages.FlexComponentsRequestProto request) throws IOException {
onRpcCall("flex");
schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS, request));
return Messages.FlexComponentResponseProto.newBuilder().build();
return Messages.FlexComponentsResponseProto.newBuilder().build();
}
@Override //SliderClusterProtocol

View File

@ -16,9 +16,23 @@
*/
package org.apache.slider.server.appmaster.security;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED;
import org.apache.slider.api.resource.Application;
import org.apache.slider.common.SliderKeys;
import org.apache.slider.common.SliderXmlConfKeys;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.core.exceptions.SliderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
/**
* Class keeping code security information
*/
@ -26,111 +40,122 @@ public class SecurityConfiguration {
protected static final Logger log =
LoggerFactory.getLogger(SecurityConfiguration.class);
private final Configuration configuration;
private final Application application;
private String clusterName;
// private void validate() throws SliderException {
// if (isSecurityEnabled()) {
// String principal = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
// if(SliderUtils.isUnset(principal)) {
// // if no login identity is available, fail
// UserGroupInformation loginUser = null;
// try {
// loginUser = getLoginUser();
// } catch (IOException e) {
// throw new SliderException(EXIT_UNAUTHORIZED, e,
// "No principal configured for the application and "
// + "exception raised during retrieval of login user. "
// + "Unable to proceed with application "
// + "initialization. Please ensure a value "
// + "for %s exists in the application "
// + "configuration or the login issue is addressed",
// SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
// }
// if (loginUser == null) {
// throw new SliderException(EXIT_UNAUTHORIZED,
// "No principal configured for the application "
// + "and no login user found. "
// + "Unable to proceed with application "
// + "initialization. Please ensure a value "
// + "for %s exists in the application "
// + "configuration or the login issue is addressed",
// SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
// }
// }
// // ensure that either local or distributed keytab mechanism is enabled,
// // but not both
// String keytabFullPath = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM)
// .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
// String keytabName = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM)
// .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
// if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
// throw new SliderException(EXIT_UNAUTHORIZED,
// "Both a keytab on the cluster host (%s) and a"
// + " keytab to be retrieved from HDFS (%s) are"
// + " specified. Please configure only one keytab"
// + " retrieval mechanism.",
// SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
// SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
//
// }
// }
// }
//
// protected UserGroupInformation getLoginUser() throws IOException {
// return UserGroupInformation.getLoginUser();
// }
//
// public boolean isSecurityEnabled () {
// return SliderUtils.isHadoopClusterSecure(configuration);
// }
//
// public String getPrincipal () throws IOException {
// String principal = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
// if (SliderUtils.isUnset(principal)) {
// principal = UserGroupInformation.getLoginUser().getShortUserName();
// log.info("No principal set in the slider configuration. Will use AM login"
// + " identity {} to attempt keytab-based login", principal);
// }
//
// return principal;
// }
//
// public boolean isKeytabProvided() {
// boolean keytabProvided = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM)
// .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null ||
// instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM).
// get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null;
// return keytabProvided;
//
// }
//
// public File getKeytabFile(AggregateConf instanceDefinition)
// throws SliderException, IOException {
// //TODO implement this for dash semantic
// String keytabFullPath = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM)
// .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
// File localKeytabFile;
// if (SliderUtils.isUnset(keytabFullPath)) {
// // get the keytab
// String keytabName = instanceDefinition.getAppConfOperations()
// .getComponent(SliderKeys.COMPONENT_AM).
// get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
// log.info("No host keytab file path specified. Will attempt to retrieve"
// + " keytab file {} as a local resource for the container",
// keytabName);
// // download keytab to local, protected directory
// localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
// } else {
// log.info("Using host keytab file {} for login", keytabFullPath);
// localKeytabFile = new File(keytabFullPath);
// }
// return localKeytabFile;
// }
public SecurityConfiguration(Configuration configuration,
Application application,
String clusterName) throws SliderException {
Preconditions.checkNotNull(configuration);
Preconditions.checkNotNull(application);
Preconditions.checkNotNull(clusterName);
this.configuration = configuration;
this.application = application;
this.clusterName = clusterName;
validate();
}
private void validate() throws SliderException {
if (isSecurityEnabled()) {
// TODO use AM configuration rather than app configuration
String principal = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
if(SliderUtils.isUnset(principal)) {
// if no login identity is available, fail
UserGroupInformation loginUser = null;
try {
loginUser = getLoginUser();
} catch (IOException e) {
throw new SliderException(EXIT_UNAUTHORIZED, e,
"No principal configured for the application and "
+ "exception raised during retrieval of login user. "
+ "Unable to proceed with application "
+ "initialization. Please ensure a value "
+ "for %s exists in the application "
+ "configuration or the login issue is addressed",
SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
}
if (loginUser == null) {
throw new SliderException(EXIT_UNAUTHORIZED,
"No principal configured for the application "
+ "and no login user found. "
+ "Unable to proceed with application "
+ "initialization. Please ensure a value "
+ "for %s exists in the application "
+ "configuration or the login issue is addressed",
SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
}
}
// ensure that either local or distributed keytab mechanism is enabled,
// but not both
String keytabFullPath = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
String keytabName = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
throw new SliderException(EXIT_UNAUTHORIZED,
"Both a keytab on the cluster host (%s) and a"
+ " keytab to be retrieved from HDFS (%s) are"
+ " specified. Please configure only one keytab"
+ " retrieval mechanism.",
SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
}
}
}
protected UserGroupInformation getLoginUser() throws IOException {
return UserGroupInformation.getLoginUser();
}
public boolean isSecurityEnabled() {
return SliderUtils.isHadoopClusterSecure(configuration);
}
public String getPrincipal() throws IOException {
String principal = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
if (SliderUtils.isUnset(principal)) {
principal = UserGroupInformation.getLoginUser().getShortUserName();
log.info("No principal set in the slider configuration. Will use AM " +
"login identity {} to attempt keytab-based login", principal);
}
return principal;
}
public boolean isKeytabProvided() {
String keytabLocalPath = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
String keytabName = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
return StringUtils.isNotBlank(keytabLocalPath)
|| StringUtils.isNotBlank(keytabName);
}
public File getKeytabFile()
throws SliderException, IOException {
//TODO implement this for dash semantic
String keytabFullPath = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
File localKeytabFile;
if (SliderUtils.isUnset(keytabFullPath)) {
// get the keytab
String keytabName = application.getConfiguration().getProperty(
SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
log.info("No host keytab file path specified. Will attempt to retrieve"
+ " keytab file {} as a local resource for the container",
keytabName);
// download keytab to local, protected directory
localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
} else {
log.info("Using host keytab file {} for login", keytabFullPath);
localKeytabFile = new File(keytabFullPath);
}
return localKeytabFile;
}
}

View File

@ -38,6 +38,7 @@ import org.apache.slider.api.ClusterNode;
import org.apache.slider.api.InternalKeys;
import org.apache.slider.api.StatusKeys;
import org.apache.slider.api.proto.Messages;
import org.apache.slider.api.proto.Messages.ComponentCountProto;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.ApplicationState;
import org.apache.slider.api.resource.Component;
@ -219,7 +220,13 @@ public class AppState {
return roleStatusMap;
}
protected Map<String, ProviderRole> getRoleMap() {
return roles;
}
public Map<Integer, ProviderRole> getRolePriorityMap() {
return rolePriorityMap;
}
private Map<ContainerId, RoleInstance> getStartingContainers() {
return startingContainers;
@ -257,6 +264,11 @@ public class AppState {
return roleHistory;
}
@VisibleForTesting
public void setRoleHistory(RoleHistory roleHistory) {
this.roleHistory = roleHistory;
}
/**
* Get the path used for history files
* @return the directory used for history files
@ -306,6 +318,15 @@ public class AppState {
appMetrics
.tag("appId", "Application id for service", app.getId());
org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
startTimeThreshold =
conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
failureThreshold = conf.getPropertyInt(CONTAINER_FAILURE_THRESHOLD,
DEFAULT_CONTAINER_FAILURE_THRESHOLD);
nodeFailureThreshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
DEFAULT_NODE_FAILURE_THRESHOLD);
//build the initial role list
List<ProviderRole> roleList = new ArrayList<>(binding.roles);
for (ProviderRole providerRole : roleList) {
@ -314,6 +335,7 @@ public class AppState {
int priority = 1;
for (Component component : app.getComponents()) {
priority = getNewPriority(priority);
String name = component.getName();
if (roles.containsKey(name)) {
continue;
@ -324,22 +346,13 @@ public class AppState {
}
log.info("Adding component: " + name);
ProviderRole dynamicRole =
createComponent(name, name, component, priority++);
createComponent(name, name, component, priority);
buildRole(dynamicRole);
roleList.add(dynamicRole);
}
//then pick up the requirements
buildRoleRequirementsFromResources();
org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
startTimeThreshold =
conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
failureThreshold = (int) conf.getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
DEFAULT_CONTAINER_FAILURE_THRESHOLD);
nodeFailureThreshold = (int) conf.getPropertyLong(NODE_FAILURE_THRESHOLD,
DEFAULT_NODE_FAILURE_THRESHOLD);
// set up the role history
roleHistory = new RoleHistory(roleStatusMap.values(), recordFactory);
roleHistory.onStart(binding.fs, binding.historyPath);
@ -359,34 +372,47 @@ public class AppState {
//TODO WHY do we need to create the component for AM ?
public ProviderRole createComponent(String name, String group,
Component component, int priority) throws BadConfigException {
org.apache.slider.api.resource.Configuration conf =
component.getConfiguration();
long placementTimeout = conf.getPropertyLong(PLACEMENT_ESCALATE_DELAY,
DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS);
long placementPolicy = conf.getPropertyLong(COMPONENT_PLACEMENT_POLICY,
PlacementPolicy.DEFAULT);
int threshold = (int) conf
.getPropertyLong(NODE_FAILURE_THRESHOLD, nodeFailureThreshold);
int threshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
nodeFailureThreshold);
String label = conf.getProperty(YARN_LABEL_EXPRESSION,
DEF_YARN_LABEL_EXPRESSION);
ProviderRole newRole =
new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
placementTimeout, "", component);
placementTimeout, label, component);
log.info("Created a new role " + newRole);
return newRole;
}
public synchronized void updateComponents(
Messages.FlexComponentRequestProto requestProto)
throws BadConfigException {
@VisibleForTesting
public synchronized List<ProviderRole> updateComponents(Map<String, Long>
componentCounts) throws BadConfigException {
for (Component component : app.getComponents()) {
if (component.getName().equals(requestProto.getName())) {
component
.setNumberOfContainers((long) requestProto.getNumberOfContainers());
if (componentCounts.containsKey(component.getName())) {
component.setNumberOfContainers(componentCounts.get(component
.getName()));
}
}
//TODO update cluster description
buildRoleRequirementsFromResources();
return buildRoleRequirementsFromResources();
}
public synchronized List<ProviderRole> updateComponents(
Messages.FlexComponentsRequestProto requestProto)
throws BadConfigException {
Map<String, Long> componentCounts = new HashMap<>();
for (ComponentCountProto componentCount : requestProto
.getComponentsList()) {
componentCounts.put(componentCount.getName(), componentCount
.getNumberOfContainers());
}
return updateComponents(componentCounts);
}
/**
@ -445,10 +471,8 @@ public class AppState {
// now the dynamic ones. Iterate through the the cluster spec and
// add any role status entries not in the role status
List<RoleStatus> list = new ArrayList<>(getRoleStatusMap().values());
for (RoleStatus roleStatus : list) {
String name = roleStatus.getName();
Component component = roleStatus.getProviderRole().component;
for (Component component : app.getComponents()) {
String name = component.getName();
if (roles.containsKey(name)) {
continue;
}
@ -460,10 +484,12 @@ public class AppState {
groupCount = groupCounts.get(name);
}
for (int i = groupCount + 1; i <= desiredInstanceCount; i++) {
int priority = roleStatus.getPriority();
// this is a new instance of an existing group
String newName = String.format("%s%d", name, i);
int newPriority = getNewPriority(priority + i - 1);
if (roles.containsKey(newName)) {
continue;
}
int newPriority = getNewPriority(i);
log.info("Adding new role {}", newName);
ProviderRole dynamicRole =
createComponent(newName, name, component, newPriority);
@ -477,11 +503,12 @@ public class AppState {
}
} else {
// this is a new value
log.info("Adding new role {}", name);
log.info("Adding new role {}, num containers {}", name,
component.getNumberOfContainers());
ProviderRole dynamicRole =
createComponent(name, name, component, roleStatus.getPriority());
createComponent(name, name, component, getNewPriority(1));
RoleStatus newRole = buildRole(dynamicRole);
incDesiredContainers(roleStatus,
incDesiredContainers(newRole,
component.getNumberOfContainers().intValue());
log.info("New role {}", newRole);
if (roleHistory != null) {
@ -518,7 +545,8 @@ public class AppState {
if (roleStatusMap.containsKey(priority)) {
throw new BadConfigException("Duplicate Provider Key: %s and %s",
providerRole,
roleStatusMap.get(priority));
roleStatusMap.get(priority)
.getProviderRole());
}
RoleStatus roleStatus = new RoleStatus(providerRole);
roleStatusMap.put(priority, roleStatus);
@ -536,6 +564,8 @@ public class AppState {
private void buildRoleResourceRequirements() {
for (RoleStatus role : roleStatusMap.values()) {
role.setResourceRequirements(buildResourceRequirements(role));
log.info("Setting resource requirements for {} to {}", role.getName(),
role.getResourceRequirements());
}
}
/**
@ -827,7 +857,6 @@ public class AppState {
* @return the container request to submit or null if there is none
*/
private AMRMClient.ContainerRequest createContainerRequest(RoleStatus role) {
incPendingContainers(role);
if (role.isAntiAffinePlacement()) {
return createAAContainerRequest(role);
} else {
@ -857,28 +886,58 @@ public class AppState {
return request.getIssuedRequest();
}
private void incPendingContainers(RoleStatus role) {
role.getComponentMetrics().containersPending.incr();
appMetrics.containersPending.incr();
@VisibleForTesting
public void incRequestedContainers(RoleStatus role) {
log.info("Incrementing requested containers for {}", role.getName());
role.getComponentMetrics().containersRequested.incr();
appMetrics.containersRequested.incr();
}
private void decPendingContainers(RoleStatus role) {
decPendingContainers(role, 1);
private void decRequestedContainers(RoleStatus role) {
role.getComponentMetrics().containersRequested.decr();
appMetrics.containersRequested.decr();
log.info("Decrementing requested containers for {} by {} to {}", role
.getName(), 1, role.getComponentMetrics().containersRequested.value());
}
private void decPendingContainers(RoleStatus role, int n) {
role.getComponentMetrics().containersPending.decr(n);;
appMetrics.containersPending.decr(n);
private int decRequestedContainersToFloor(RoleStatus role, int delta) {
int actual = decMetricToFloor(role.getComponentMetrics()
.containersRequested, delta);
appMetrics.containersRequested.decr(actual);
log.info("Decrementing requested containers for {} by {} to {}", role
.getName(), actual, role.getComponentMetrics().containersRequested
.value());
return actual;
}
private int decAAPendingToFloor(RoleStatus role, int delta) {
int actual = decMetricToFloor(role.getComponentMetrics()
.pendingAAContainers, delta);
appMetrics.pendingAAContainers.decr(actual);
log.info("Decrementing AA pending containers for {} by {} to {}", role
.getName(), actual, role.getComponentMetrics().pendingAAContainers
.value());
return actual;
}
private void incRunningContainers(RoleStatus role) {
role.getComponentMetrics().containersRunning.incr();;
private int decMetricToFloor(MutableGaugeInt metric, int delta) {
int currentValue = metric.value();
int decrAmount = delta;
if (currentValue - delta < 0) {
decrAmount = currentValue;
}
metric.decr(decrAmount);
return decrAmount;
}
@VisibleForTesting
public void incRunningContainers(RoleStatus role) {
role.getComponentMetrics().containersRunning.incr();
appMetrics.containersRunning.incr();
}
private void decRunningContainers(RoleStatus role) {
role.getComponentMetrics().containersRunning.decr();;
role.getComponentMetrics().containersRunning.decr();
appMetrics.containersRunning.decr();
}
@ -902,26 +961,47 @@ public class AppState {
appMetrics.containersCompleted.incr();
}
private void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
role.getComponentMetrics().containersFailed.incr();
appMetrics.containersFailed.incr();
@VisibleForTesting
public void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
switch (outcome) {
case Preempted:
appMetrics.containersPreempted.incr();
role.getComponentMetrics().containersPreempted.incr();
break;
case Disk_failure:
appMetrics.containersDiskFailure.incr();
appMetrics.containersFailed.incr();
role.getComponentMetrics().containersDiskFailure.incr();
role.getComponentMetrics().containersFailed.incr();
break;
case Failed:
appMetrics.failedSinceLastThreshold.incr();
appMetrics.containersFailed.incr();
role.getComponentMetrics().failedSinceLastThreshold.incr();
role.getComponentMetrics().containersFailed.incr();
break;
case Failed_limits_exceeded:
appMetrics.containersLimitsExceeded.incr();
appMetrics.failedSinceLastThreshold.incr();
appMetrics.containersFailed.incr();
role.getComponentMetrics().containersLimitsExceeded.incr();
role.getComponentMetrics().failedSinceLastThreshold.incr();
role.getComponentMetrics().containersFailed.incr();
break;
default:
appMetrics.failedSinceLastThreshold.incr();
appMetrics.containersFailed.incr();
role.getComponentMetrics().failedSinceLastThreshold.incr();
role.getComponentMetrics().containersFailed.incr();
break;
}
}
/**
* Build up the resource requirements for this role from the
* cluster specification, including substituing max allowed values
* if the specification asked for it.
* Build up the resource requirements for this role from the cluster
* specification, including substituting max allowed values if the
* specification asked for it (except when
* {@link ResourceKeys#YARN_RESOURCE_NORMALIZATION_ENABLED} is set to false).
* @param role role
* during normalization
*/
@ -934,17 +1014,36 @@ public class AppState {
// TODO why do we need to create the component for AM ?
return Resource.newInstance(1, 512);
}
int cores = Math.min(containerMaxCores, component.getResource().getCpus());
int cores = DEF_YARN_CORES;
if (component.getResource() != null && component.getResource().getCpus()
!= null) {
cores = Math.min(containerMaxCores, component.getResource().getCpus());
}
if (cores <= 0) {
cores = DEF_YARN_CORES;
}
long mem = Math.min(containerMaxMemory,
Long.parseLong(component.getResource().getMemory()));
long rawMem = DEF_YARN_MEMORY;
if (component.getResource() != null && component.getResource().getMemory()
!= null) {
if (YARN_RESOURCE_MAX.equals(component.getResource().getMemory())) {
rawMem = containerMaxMemory;
} else {
rawMem = Long.parseLong(component.getResource().getMemory());
}
}
boolean normalize = component.getConfiguration().getPropertyBool(
YARN_RESOURCE_NORMALIZATION_ENABLED, true);
if (!normalize) {
log.info("Resource normalization: disabled");
log.debug("Component {} has RAM={}, vCores={}", name, rawMem, cores);
return Resources.createResource(rawMem, cores);
}
long mem = Math.min(containerMaxMemory, rawMem);
if (mem <= 0) {
mem = DEF_YARN_MEMORY;
}
Resource capability = Resource.newInstance(mem, cores);
log.debug("Component {} has RAM={}, vCores ={}", name, mem, cores);
log.debug("Component {} has RAM={}, vCores={}", name, mem, cores);
Resource normalized = recordFactory.normalize(capability, minResource,
maxResource);
if (!Resources.equals(normalized, capability)) {
@ -1060,7 +1159,7 @@ public class AppState {
log.debug("Created {} cancel requests", operations.size());
return new NodeUpdatedOutcome(true, operations);
}
return new NodeUpdatedOutcome(false, new ArrayList<AbstractRMOperation>(0));
return new NodeUpdatedOutcome(false, new ArrayList<>(0));
}
/**
@ -1203,7 +1302,6 @@ public class AppState {
message = String.format("Failure %s (%d)", containerId, exitStatus);
}
roleStatus.noteFailed(message);
incFailedContainers(roleStatus, result.outcome);
long failed =
roleStatus.getComponentMetrics().containersFailed.value();
log.info("Current count of failed role[{}] {} = {}",
@ -1409,7 +1507,7 @@ public class AppState {
role.getName(), failures, threshold);
}
if (failures > threshold) {
if (threshold > 0 && failures > threshold) {
throw new TriggerClusterTeardownException(
SliderExitCodes.EXIT_DEPLOYMENT_FAILED, FinalApplicationStatus.FAILED,
ErrorStrings.E_UNSTABLE_CLUSTER
@ -1428,7 +1526,7 @@ public class AppState {
private int getFailureThresholdForRole(RoleStatus roleStatus) {
return (int) roleStatus.getProviderRole().component.getConfiguration()
.getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
DEFAULT_CONTAINER_FAILURE_THRESHOLD);
failureThreshold);
}
@ -1497,7 +1595,8 @@ public class AppState {
}
log.info("Reviewing {} : ", role);
log.debug("Expected {}, Delta: {}", expected, delta);
log.debug("Expected {}, Requested/Running {}, Delta: {}", expected,
role.getActualAndRequested(), delta);
checkFailureThreshold(role);
if (expected < 0 ) {
@ -1526,7 +1625,7 @@ public class AppState {
pending--;
log.info("Starting an anti-affine request sequence for {} nodes; pending={}",
delta, pending);
addContainerRequest(operations, request);
addContainerRequest(operations, request, role);
} else {
log.info("No location for anti-affine request");
}
@ -1536,12 +1635,12 @@ public class AppState {
}
log.info("Setting pending to {}", pending);
//TODO
role.setAAPending((int)pending);
role.setAAPending(pending);
} else {
for (int i = 0; i < delta; i++) {
//get the role history to select a suitable node, if available
addContainerRequest(operations, createContainerRequest(role));
addContainerRequest(operations, createContainerRequest(role), role);
}
}
} else if (delta < 0) {
@ -1552,25 +1651,35 @@ public class AppState {
long excess = -delta;
// how many requests are outstanding? for AA roles, this includes pending
long outstandingRequests = role.getPending() + role.getAAPending();
long outstandingRequests = role.getRequested() + role.getAAPending();
if (outstandingRequests > 0) {
// outstanding requests.
int toCancel = (int)Math.min(outstandingRequests, excess);
int pendingCancelled = 0;
if (role.getAAPending() > 0) {
pendingCancelled = decAAPendingToFloor(role, toCancel);
}
int remainingToCancel = toCancel - pendingCancelled;
// Delegate to Role History
List<AbstractRMOperation> cancellations = roleHistory.cancelRequestsForRole(role, toCancel);
List<AbstractRMOperation> cancellations = roleHistory
.cancelRequestsForRole(role, remainingToCancel);
log.info("Found {} outstanding requests to cancel", cancellations.size());
operations.addAll(cancellations);
if (toCancel != cancellations.size()) {
if (remainingToCancel != cancellations.size()) {
log.error("Tracking of outstanding requests is not in sync with the summary statistics:" +
" expected to be able to cancel {} requests, but got {}",
toCancel, cancellations.size());
remainingToCancel, cancellations.size());
}
decPendingContainers(role, toCancel);
excess -= toCancel;
int requestCancelled = decRequestedContainersToFloor(role,
remainingToCancel);
excess -= pendingCancelled;
excess -= requestCancelled;
assert excess >= 0 : "Attempted to cancel too many requests";
log.info("Submitted {} cancellations, leaving {} to release",
toCancel, excess);
pendingCancelled + requestCancelled, excess);
if (excess == 0) {
log.info("After cancelling requests, application is now at desired size");
}
@ -1645,7 +1754,7 @@ public class AppState {
* @return true if a request was added
*/
private boolean addContainerRequest(List<AbstractRMOperation> operations,
AMRMClient.ContainerRequest containerAsk) {
AMRMClient.ContainerRequest containerAsk, RoleStatus role) {
if (containerAsk != null) {
log.info("Container ask is {} and label = {}", containerAsk,
containerAsk.getNodeLabelExpression());
@ -1654,6 +1763,7 @@ public class AppState {
log.warn("Memory requested: {} > max of {}", askMemory, containerMaxMemory);
}
operations.add(new ContainerRequestOperation(containerAsk));
incRequestedContainers(role);
return true;
} else {
return false;
@ -1727,6 +1837,8 @@ public class AppState {
List<Container> allocatedContainers,
List<ContainerAssignment> assignments,
List<AbstractRMOperation> operations) {
assignments.clear();
operations.clear();
List<Container> ordered = roleHistory.prepareAllocationList(allocatedContainers);
log.info("onContainersAllocated(): Total containers allocated = {}", ordered.size());
for (Container container : ordered) {
@ -1735,13 +1847,13 @@ public class AppState {
//get the role
final ContainerId cid = container.getId();
final RoleStatus role = lookupRoleStatus(container);
decPendingContainers(role);
decRequestedContainers(role);
//inc allocated count -this may need to be dropped in a moment,
// but us needed to update the logic below
MutableGaugeInt containersRunning = role.getComponentMetrics().containersRunning;
final long allocated = containersRunning.value();
incRunningContainers(role);
final long allocated = containersRunning.value();
final long desired = role.getDesired();
final String roleName = role.getName();
@ -1778,7 +1890,8 @@ public class AppState {
if (role.getAAPending() > 0) {
// still an outstanding AA request: need to issue a new one.
log.info("Asking for next container for AA role {}", roleName);
if (!addContainerRequest(operations, createAAContainerRequest(role))) {
if (!addContainerRequest(operations, createAAContainerRequest(role),
role)) {
log.info("No capacity in cluster for new requests");
} else {
role.decAAPending();

View File

@ -29,7 +29,7 @@ public enum ContainerOutcome {
Completed,
Failed,
Failed_limits_exceeded,
Node_failure,
Disk_failure,
Preempted;
/**
@ -48,7 +48,7 @@ public enum ContainerOutcome {
// could either be a release or node failure. Treat as completion
return Completed;
case ContainerExitStatus.DISKS_FAILED:
return Node_failure;
return Disk_failure;
case ContainerExitStatus.PREEMPTED:
return Preempted;
case ContainerExitStatus.KILLED_EXCEEDED_PMEM:

View File

@ -222,7 +222,7 @@ public class NodeEntry implements Cloneable {
// general "any reason" app failure
case Failed:
// specific node failure
case Node_failure:
case Disk_failure:
++failed;
++failedRecently;

View File

@ -21,7 +21,6 @@ package org.apache.slider.server.appmaster.state;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricSet;
import com.google.common.base.Preconditions;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.slider.api.types.ComponentInformation;
import org.apache.slider.api.types.RoleStatistics;
@ -35,8 +34,6 @@ import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import static org.apache.hadoop.metrics2.lib.Interns.info;
/**
* Models the ongoing status of all nodes in an application.
*
@ -207,12 +204,16 @@ public final class RoleStatus implements MetricSet {
return componentMetrics.containersDesired.value();
}
long getRunning() {
public void setDesired(int desired) {
componentMetrics.containersDesired.set(desired);
}
public long getRunning() {
return componentMetrics.containersRunning.value();
}
public long getPending() {
return componentMetrics.containersPending.value();
public long getRequested() {
return componentMetrics.containersRequested.value();
}
public long getAAPending() {
@ -222,22 +223,35 @@ public final class RoleStatus implements MetricSet {
void decAAPending() {
componentMetrics.pendingAAContainers.decr();
}
void setAAPending(long n) {
componentMetrics.pendingAAContainers.set((int)n);
}
long getFailedRecently() {
public long getLimitsExceeded() {
return componentMetrics.containersLimitsExceeded.value();
}
public long getPreempted() {
return componentMetrics.containersPreempted.value();
}
public long getDiskFailed() {
return componentMetrics.containersDiskFailure.value();
}
public long getFailedRecently() {
return componentMetrics.failedSinceLastThreshold.value();
}
long resetFailedRecently() {
public long resetFailedRecently() {
long count =
componentMetrics.failedSinceLastThreshold.value();
componentMetrics.failedSinceLastThreshold.set(0);
return count;
}
long getFailed() {
public long getFailed() {
return componentMetrics.containersFailed.value();
}
@ -254,6 +268,8 @@ public final class RoleStatus implements MetricSet {
long inuse = getActualAndRequested();
long delta = getDesired() - inuse;
if (delta < 0) {
// TODO this doesn't do anything now that we're not tracking releasing
// containers -- maybe we need releasing
//if we are releasing, remove the number that are already released.
//but never switch to a positive
delta = Math.min(delta, 0);
@ -262,11 +278,11 @@ public final class RoleStatus implements MetricSet {
}
/**
* Get count of actual and requested containers. This includes pending ones
* Get count of actual and requested containers.
* @return the size of the application when outstanding requests are included.
*/
public long getActualAndRequested() {
return getRunning() + getPending();
return getRunning() + getRequested();
}
/**
@ -341,6 +357,14 @@ public final class RoleStatus implements MetricSet {
public synchronized RoleStatistics getStatistics() {
RoleStatistics stats = new RoleStatistics();
stats.activeAA = getOutstandingAARequestCount();
stats.actual = getRunning();
stats.desired = getDesired();
stats.failed = getFailed();
stats.limitsExceeded = getLimitsExceeded();
stats.nodeFailed = getDiskFailed();
stats.preempted = getPreempted();
stats.requested = getRequested();
stats.started = getRunning();
return stats;
}

View File

@ -156,7 +156,7 @@ public class IndexBlock extends SliderHamletBlock {
} else {
aatext = "";
}
if (status.getPending() > 0) {
if (status.getRequested() > 0) {
roleWithOpenRequest ++;
}
}
@ -165,7 +165,7 @@ public class IndexBlock extends SliderHamletBlock {
.td().a(nameUrl, roleName)._()
.td(String.format("%d", metrics.containersDesired.value()))
.td(String.format("%d", metrics.containersRunning.value()))
.td(String.format("%d", metrics.containersPending.value()))
.td(String.format("%d", metrics.containersRequested.value()))
.td(String.format("%d", metrics.containersFailed.value()))
.td(aatext)
._();

View File

@ -80,12 +80,16 @@ message UpgradeContainersRequestProto {
message UpgradeContainersResponseProto {
}
message FlexComponentRequestProto {
optional string name = 1;
optional int32 numberOfContainers = 2;
message FlexComponentsRequestProto {
repeated ComponentCountProto components = 1;
}
message FlexComponentResponseProto {
message ComponentCountProto {
optional string name = 1;
optional int64 numberOfContainers = 2;
}
message FlexComponentsResponseProto {
}
/**

View File

@ -61,7 +61,7 @@ service SliderClusterProtocolPB {
rpc upgradeContainers(UpgradeContainersRequestProto)
returns(UpgradeContainersResponseProto);
rpc flexComponent(FlexComponentRequestProto) returns (FlexComponentResponseProto);
rpc flexComponents(FlexComponentsRequestProto) returns (FlexComponentsResponseProto);
/**
* Get the current cluster status

View File

@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.api;
import org.apache.hadoop.conf.Configuration;
import org.apache.slider.server.appmaster.rpc.RpcBinder;
import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPB;
import org.junit.Test;
import java.net.InetSocketAddress;
import static org.junit.Assert.assertTrue;
/**
* Tests RPC work.
*/
public class TestRPCBinding {
@Test
public void testRegistration() throws Throwable {
Configuration conf = new Configuration();
RpcBinder.registerSliderAPI(conf);
assertTrue(RpcBinder.verifyBondedToProtobuf(conf,
SliderClusterProtocolPB.class));
}
@Test
public void testGetProxy() throws Throwable {
Configuration conf = new Configuration();
InetSocketAddress saddr = new InetSocketAddress("127.0.0.1", 9000);
SliderClusterProtocol proxy =
RpcBinder.connectToServer(saddr, null, conf, 1000);
}
}

View File

@ -0,0 +1,229 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.common.params.Arguments;
import org.apache.slider.common.params.SliderActions;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import org.apache.slider.core.exceptions.ErrorStrings;
import org.apache.slider.core.exceptions.UsageException;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
/**
* Test the argument parsing/validation logic.
*/
public class TestClientBadArgs extends SliderTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestClientBadArgs.class);
@Test
public void testNoAction() throws Throwable {
launchExpectingException(SliderClient.class,
createTestConfig(),
"Usage: slider COMMAND",
EMPTY_LIST);
}
@Test
public void testUnknownAction() throws Throwable {
launchExpectingException(SliderClient.class,
createTestConfig(),
"not-a-known-action",
Arrays.asList("not-a-known-action"));
}
@Test
public void testActionWithoutOptions() throws Throwable {
launchExpectingException(SliderClient.class,
createTestConfig(),
"Usage: slider build <application>",
Arrays.asList(SliderActions.ACTION_BUILD));
}
@Test
public void testActionWithoutEnoughArgs() throws Throwable {
launchExpectingException(SliderClient.class,
createTestConfig(),
ErrorStrings.ERROR_NOT_ENOUGH_ARGUMENTS,
Arrays.asList(SliderActions.ACTION_START));
}
@Test
public void testActionWithTooManyArgs() throws Throwable {
launchExpectingException(SliderClient.class,
createTestConfig(),
ErrorStrings.ERROR_TOO_MANY_ARGUMENTS,
Arrays.asList(SliderActions.ACTION_HELP,
"hello, world"));
}
@Test
public void testBadImageArg() throws Throwable {
launchExpectingException(SliderClient.class,
createTestConfig(),
"Unknown option: --image",
Arrays.asList(SliderActions.ACTION_HELP,
Arguments.ARG_IMAGE));
}
@Test
public void testRegistryUsage() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"org.apache.slider.core.exceptions.UsageException: Argument --name " +
"missing",
Arrays.asList(SliderActions.ACTION_REGISTRY));
assertTrue(exception instanceof UsageException);
LOG.info(exception.toString());
}
@Test
public void testRegistryExportBadUsage1() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"Expected a value after parameter --getexp",
Arrays.asList(SliderActions.ACTION_REGISTRY,
Arguments.ARG_NAME,
"cl1",
Arguments.ARG_GETEXP));
assertTrue(exception instanceof BadCommandArgumentsException);
LOG.info(exception.toString());
}
@Test
public void testRegistryExportBadUsage2() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"Expected a value after parameter --getexp",
Arrays.asList(SliderActions.ACTION_REGISTRY,
Arguments.ARG_NAME,
"cl1",
Arguments.ARG_LISTEXP,
Arguments.ARG_GETEXP));
assertTrue(exception instanceof BadCommandArgumentsException);
LOG.info(exception.toString());
}
@Test
public void testRegistryExportBadUsage3() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"Usage: registry",
Arrays.asList(SliderActions.ACTION_REGISTRY,
Arguments.ARG_NAME,
"cl1",
Arguments.ARG_LISTEXP,
Arguments.ARG_GETEXP,
"export1"));
assertTrue(exception instanceof UsageException);
LOG.info(exception.toString());
}
@Test
public void testUpgradeUsage() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"org.apache.slider.core.exceptions.BadCommandArgumentsException: Not " +
"enough arguments for action: upgrade Expected minimum 1 but got 0",
Arrays.asList(SliderActions.ACTION_UPGRADE));
assertTrue(exception instanceof BadCommandArgumentsException);
LOG.info(exception.toString());
}
public Configuration createTestConfig() {
Configuration configuration = new Configuration();
configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032");
return configuration;
}
@Ignore
@Test
public void testUpgradeWithTemplateResourcesAndContainersOption() throws
Throwable {
//TODO test upgrade args
String appName = "test_hbase";
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"BadCommandArgumentsException: Option --containers cannot be "
+ "specified with --appdef",
Arrays.asList(SliderActions.ACTION_UPGRADE,
appName,
Arguments.ARG_APPDEF,
"/tmp/app.json",
Arguments.ARG_CONTAINERS,
"container_1"
));
assertTrue(exception instanceof BadCommandArgumentsException);
LOG.info(exception.toString());
}
@Ignore
@Test
public void testUpgradeWithTemplateResourcesAndComponentsOption() throws
Throwable {
//TODO test upgrade args
String appName = "test_hbase";
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"BadCommandArgumentsException: Option --components cannot be "
+ "specified with --appdef",
Arrays.asList(SliderActions.ACTION_UPGRADE,
appName,
Arguments.ARG_APPDEF,
"/tmp/app.json",
Arguments.ARG_COMPONENTS,
"HBASE_MASTER"
));
assertTrue(exception instanceof BadCommandArgumentsException);
LOG.info(exception.toString());
}
@Test
public void testNodesMissingFile() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
createTestConfig(),
"after parameter --out",
Arrays.asList(SliderActions.ACTION_NODES, Arguments.ARG_OUTPUT));
assertTrue(exception instanceof BadCommandArgumentsException);
}
@Test
public void testFlexWithNoComponents() throws Throwable {
Throwable exception = launchExpectingException(SliderClient.class,
new Configuration(),
"Usage: slider flex <application>",
Arrays.asList(
SliderActions.ACTION_FLEX,
"flex1",
Arguments.ARG_DEFINE,
YarnConfiguration.RM_ADDRESS + "=127.0.0.1:8032"
));
assertTrue(exception instanceof UsageException);
LOG.info(exception.toString());
}
}

View File

@ -0,0 +1,81 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.client;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.common.params.Arguments;
import org.apache.slider.common.params.ClientArgs;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.core.main.ServiceLauncher;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
import java.net.UnknownHostException;
import java.util.Arrays;
/**
* Test bad argument handling.
*/
public class TestClientBasicArgs extends SliderTestBase {
/**
* Help should print out help string and then succeed.
* @throws Throwable
*/
@Test
public void testHelp() throws Throwable {
ServiceLauncher launcher = launch(SliderClient.class,
SliderUtils.createConfiguration(),
Arrays.asList(ClientArgs.ACTION_HELP));
assertEquals(0, launcher.getServiceExitCode());
}
@Test
public void testNoArgs() throws Throwable {
launchExpectingException(SliderClient.class,
SliderUtils.createConfiguration(),
"Usage: slider COMMAND",
EMPTY_LIST);
}
@Test
public void testListUnknownRM() throws Throwable {
try {
YarnConfiguration conf = SliderUtils.createConfiguration();
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
1000);
conf.setLong(YarnConfiguration
.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000);
ServiceLauncher launcher = launch(SliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_LIST,
"cluster",
Arguments.ARG_MANAGER,
"badhost:8888"));
fail("expected an exception, got a launcher with exit code " +
launcher.getServiceExitCode());
} catch (UnknownHostException expected) {
//expected
}
}
}

View File

@ -0,0 +1,522 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.RoleKeys;
import org.apache.slider.common.SliderXmlConfKeys;
import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
import org.apache.slider.common.params.ActionBuildArgs;
import org.apache.slider.common.params.ActionCreateArgs;
import org.apache.slider.common.params.ActionDestroyArgs;
import org.apache.slider.common.params.ActionExistsArgs;
import org.apache.slider.common.params.ActionFlexArgs;
import org.apache.slider.common.params.ActionFreezeArgs;
import org.apache.slider.common.params.ActionListArgs;
import org.apache.slider.common.params.ActionStatusArgs;
import org.apache.slider.common.params.ActionThawArgs;
import org.apache.slider.common.params.ActionUpdateArgs;
import org.apache.slider.common.params.ArgOps;
import org.apache.slider.common.params.Arguments;
import org.apache.slider.common.params.ClientArgs;
import org.apache.slider.common.params.SliderActions;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import org.apache.slider.core.exceptions.ErrorStrings;
import org.apache.slider.core.exceptions.SliderException;
import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* Test handling of common arguments, specifically how things get split up.
*/
public class TestCommonArgParsing implements SliderActions, Arguments {
public static final String CLUSTERNAME = "clustername";
@Test
public void testCreateActionArgs() throws Throwable {
ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_CREATE,
"cluster1"));
assertEquals("cluster1", clientArgs.getClusterName());
}
@Test
public void testCreateFailsNoClustername() throws Throwable {
assertParseFails(Arrays.asList(ACTION_CREATE));
}
@Test
public void testCreateFailsTwoClusternames() throws Throwable {
assertParseFails(Arrays.asList(
ACTION_CREATE,
"c1",
"c2"
));
}
@Test
public void testHelp() throws Throwable {
ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_HELP));
assertNull(clientArgs.getClusterName());
}
@Test
public void testSliderBasePath() throws Throwable {
ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST,
ARG_BASE_PATH, "/projects/slider/clusters"));
assertEquals(new Path("/projects/slider/clusters"),
clientArgs.getBasePath());
}
@Test
public void testNoSliderBasePath() throws Throwable {
ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST));
assertNull(clientArgs.getBasePath());
}
@Test
public void testListNoClusternames() throws Throwable {
ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST));
assertNull(clientArgs.getClusterName());
}
@Test
public void testListNoClusternamesDefinition() throws Throwable {
ClientArgs clientArgs = createClientArgs(Arrays.asList(
ACTION_LIST,
ARG_DEFINE,
"fs.default.FS=file://localhost"
));
assertNull(clientArgs.getClusterName());
}
@Test
public void testList1Clustername() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(ACTION_LIST, "cluster1"));
assertEquals("cluster1", ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionListArgs);
}
@Test
public void testListFailsTwoClusternames() throws Throwable {
assertParseFails(Arrays.asList(
ACTION_LIST,
"c1",
"c2"
));
}
@Test
public void testDefinitions() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_CREATE,
CLUSTERNAME,
"-D", "yarn.resourcemanager.principal=yarn/server@LOCAL",
"-D", "dfs.datanode.kerberos.principal=hdfs/server@LOCAL"
));
Configuration conf = new Configuration(false);
ca.applyDefinitions(conf);
assertEquals(CLUSTERNAME, ca.getClusterName());
assertNull(conf.get(SliderXmlConfKeys.KEY_SLIDER_BASE_PATH));
SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
SliderUtils.verifyPrincipalSet(
conf,
SliderXmlConfKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
}
@Test
public void testDefinitionsSettingBaseSliderDir() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_CREATE,
CLUSTERNAME,
"--basepath", "/projects/slider/clusters",
"-D", "yarn.resourcemanager.principal=yarn/server@LOCAL",
"-D", "dfs.datanode.kerberos.principal=hdfs/server@LOCAL"
));
Configuration conf = new Configuration(false);
ca.applyDefinitions(conf);
assertEquals(CLUSTERNAME, ca.getClusterName());
assertEquals("/projects/slider/clusters", conf.get(SliderXmlConfKeys
.KEY_SLIDER_BASE_PATH));
SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
SliderUtils.verifyPrincipalSet(conf, SliderXmlConfKeys
.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
}
/**
* Test a start command.
* @throws Throwable
*/
@Test
public void testComplexThaw() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_START,
"--manager", "rhel:8032",
"--filesystem", "hdfs://rhel:9090",
"-S", "java.security.krb5.realm=LOCAL",
"-S", "java.security.krb5.kdc=rhel",
"-D", "yarn.resourcemanager.principal=yarn/rhel@LOCAL",
"-D", "namenode.resourcemanager.principal=hdfs/rhel@LOCAL",
"cl1"
));
assertEquals("cl1", ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionThawArgs);
}
/**
* Test a force kill command where the app comes at the end of the line.
* @throws Throwable
*
*/
@Test
public void testStatusSplit() throws Throwable {
String appId = "application_1381252124398_0013";
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_STATUS,
"--manager", "rhel:8032",
"--filesystem", "hdfs://rhel:9090",
"-S", "java.security.krb5.realm=LOCAL",
"-S", "java.security.krb5.kdc=rhel",
"-D", "yarn.resourcemanager.principal=yarn/rhel@LOCAL",
"-D", "namenode.resourcemanager.principal=hdfs/rhel@LOCAL",
appId
));
assertEquals(appId, ca.getClusterName());
}
@Test
public void testFreezeFailsNoArg() throws Throwable {
assertParseFails(Arrays.asList(
ACTION_STOP
));
}
@Test
public void testFreezeWorks1Arg() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_STOP,
CLUSTERNAME
));
assertEquals(CLUSTERNAME, ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs);
}
@Test
public void testFreezeFails2Arg() throws Throwable {
assertParseFails(Arrays.asList(
ACTION_STOP, "cluster", "cluster2"
));
}
@Test
public void testFreezeForceWaitAndMessage() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_STOP, CLUSTERNAME,
ARG_FORCE,
ARG_WAIT, "0",
ARG_MESSAGE, "explanation"
));
assertEquals(CLUSTERNAME, ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs);
ActionFreezeArgs freezeArgs = (ActionFreezeArgs) ca.getCoreAction();
assertEquals("explanation", freezeArgs.message);
assertTrue(freezeArgs.force);
}
@Test
public void testGetStatusWorks1Arg() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_STATUS,
CLUSTERNAME
));
assertEquals(CLUSTERNAME, ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionStatusArgs);
}
@Test
public void testExistsWorks1Arg() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_EXISTS,
CLUSTERNAME,
ARG_LIVE
));
assertEquals(CLUSTERNAME, ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionExistsArgs);
assertTrue(ca.getActionExistsArgs().live);
}
@Test
public void testDestroy1Arg() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_DESTROY,
CLUSTERNAME
));
assertEquals(CLUSTERNAME, ca.getClusterName());
assertTrue(ca.getCoreAction() instanceof ActionDestroyArgs);
}
/**
* Assert that a pass fails with a BadCommandArgumentsException.
* @param argsList
*/
private void assertParseFails(List argsList) throws SliderException {
try {
ClientArgs clientArgs = createClientArgs(argsList);
Assert.fail("exected an exception, got " + clientArgs);
} catch (BadCommandArgumentsException ignored) {
//expected
}
}
/**
* Build and parse client args, after adding the base args list.
* @param argsList
*/
public ClientArgs createClientArgs(List<String> argsList)
throws SliderException {
ClientArgs serviceArgs = new ClientArgs(argsList);
serviceArgs.parse();
return serviceArgs;
}
public ActionCreateArgs createAction(List<String> argsList)
throws SliderException {
ClientArgs ca = createClientArgs(argsList);
assertEquals(ACTION_CREATE, ca.getAction());
ActionCreateArgs args = ca.getActionCreateArgs();
assertNotNull(args);
return args;
}
@Test
public void testSingleRoleArg() throws Throwable {
ActionCreateArgs createArgs = createAction(Arrays.asList(
ACTION_CREATE, "cluster1",
ARG_COMPONENT, "master", "5"
));
List<String> tuples = createArgs.getComponentTuples();
assertEquals(2, tuples.size());
Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
assertEquals("5", roleMap.get("master"));
}
@Test
public void testNoRoleArg() throws Throwable {
ActionCreateArgs createArgs = createAction(Arrays.asList(
ACTION_CREATE, "cluster1"
));
List<String> tuples = createArgs.getComponentTuples();
Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
assertNull(roleMap.get("master"));
}
@Test
public void testMultiRoleArgBuild() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_BUILD, "cluster1",
ARG_COMPONENT, "master", "1",
ARG_COMPONENT, "worker", "2"
));
assertEquals(ACTION_BUILD, ca.getAction());
assertTrue(ca.getCoreAction() instanceof ActionBuildArgs);
assertTrue(ca.getBuildingActionArgs() instanceof ActionBuildArgs);
AbstractClusterBuildingActionArgs args = ca.getActionBuildArgs();
List<String> tuples = args.getComponentTuples();
assertEquals(4, tuples.size());
Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
assertEquals("1", roleMap.get("master"));
assertEquals("2", roleMap.get("worker"));
}
@Test
public void testArgUpdate() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_UPDATE, "cluster1",
ARG_APPDEF, "app.json"
));
assertEquals(ACTION_UPDATE, ca.getAction());
assertTrue(ca.getCoreAction() instanceof ActionUpdateArgs);
assertTrue(ca.getActionUpdateArgs() instanceof ActionUpdateArgs);
AbstractClusterBuildingActionArgs args = ca.getActionUpdateArgs();
assertNotNull(args.appDef);
}
@Test
public void testFlexArgs() throws Throwable {
ClientArgs ca = createClientArgs(Arrays.asList(
ACTION_FLEX, "cluster1",
ARG_COMPONENT, "master", "1",
ARG_COMPONENT, "worker", "2"
));
assertTrue(ca.getCoreAction() instanceof ActionFlexArgs);
List<String> tuples = ca.getActionFlexArgs().getComponentTuples();
assertEquals(4, tuples.size());
Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
assertEquals("1", roleMap.get("master"));
assertEquals("2", roleMap.get("worker"));
}
@Test
public void testDuplicateRole() throws Throwable {
ActionCreateArgs createArgs = createAction(Arrays.asList(
ACTION_CREATE, "cluster1",
ARG_COMPONENT, "master", "1",
ARG_COMPONENT, "master", "2"
));
List<String> tuples = createArgs.getComponentTuples();
assertEquals(4, tuples.size());
try {
Map<String, String> roleMap = ArgOps.convertTupleListToMap(
"roles",
tuples);
Assert.fail("got a role map $roleMap not a failure");
} catch (BadCommandArgumentsException expected) {
assertTrue(expected.getMessage().contains(ErrorStrings
.ERROR_DUPLICATE_ENTRY));
}
}
@Test
public void testOddRoleCount() throws Throwable {
ActionCreateArgs createArgs = createAction(Arrays.asList(
ACTION_CREATE, "cluster1",
ARG_COMPONENT, "master", "1",
ARG_COMPONENT, "master", "2"
));
List<String> tuples = createArgs.getComponentTuples();
tuples.add("loggers");
assertEquals(5, tuples.size());
try {
Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles",
tuples);
Assert.fail("got a role map " + roleMap + " not a failure");
} catch (BadCommandArgumentsException expected) {
assertTrue(expected.getMessage().contains(ErrorStrings
.ERROR_PARSE_FAILURE));
}
}
/**
* Create some role-opt client args, so that multiple tests can use it.
* @return the args
*/
public ActionCreateArgs createRoleOptClientArgs() throws SliderException {
ActionCreateArgs createArgs = createAction(Arrays.asList(
ACTION_CREATE, "cluster1",
ARG_COMPONENT, "master", "1",
ARG_COMP_OPT, "master", "cheese", "swiss",
ARG_COMP_OPT, "master", "env.CHEESE", "cheddar",
ARG_COMP_OPT, "master", ResourceKeys.YARN_CORES, "3",
ARG_COMPONENT, "worker", "2",
ARG_COMP_OPT, "worker", ResourceKeys.YARN_CORES, "2",
ARG_COMP_OPT, "worker", RoleKeys.JVM_HEAP, "65536",
ARG_COMP_OPT, "worker", "env.CHEESE", "stilton"
));
return createArgs;
}
@Test
public void testRoleOptionParse() throws Throwable {
ActionCreateArgs createArgs = createRoleOptClientArgs();
Map<String, Map<String, String>> tripleMaps = createArgs.getCompOptionMap();
Map<String, String> workerOpts = tripleMaps.get("worker");
assertEquals(3, workerOpts.size());
assertEquals("2", workerOpts.get(ResourceKeys.YARN_CORES));
assertEquals("65536", workerOpts.get(RoleKeys.JVM_HEAP));
Map<String, String> masterOpts = tripleMaps.get("master");
assertEquals(3, masterOpts.size());
assertEquals("3", masterOpts.get(ResourceKeys.YARN_CORES));
}
@Test
public void testRoleOptionsMerge() throws Throwable {
ActionCreateArgs createArgs = createRoleOptClientArgs();
Map<String, Map<String, String>> roleOpts = createArgs.getCompOptionMap();
Map<String, Map<String, String>> clusterRoleMap = createEnvMap();
SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts);
Map<String, String> masterOpts = clusterRoleMap.get("master");
assertEquals("swiss", masterOpts.get("cheese"));
Map<String, String> workerOpts = clusterRoleMap.get("worker");
assertEquals("stilton", workerOpts.get("env.CHEESE"));
}
@Test
public void testEnvVariableApply() throws Throwable {
ActionCreateArgs createArgs = createRoleOptClientArgs();
Map<String, Map<String, String>> roleOpts = createArgs.getCompOptionMap();
Map<String, Map<String, String>> clusterRoleMap = createEnvMap();
SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts);
Map<String, String> workerOpts = clusterRoleMap.get("worker");
assertEquals("stilton", workerOpts.get("env.CHEESE"));
Map<String, String> envmap = SliderUtils.buildEnvMap(workerOpts);
assertEquals("stilton", envmap.get("CHEESE"));
}
/**
* Static compiler complaining about matching LinkedHashMap with Map,
* so some explicit creation here.
* @return a map of maps
*/
public Map<String, Map<String, String>> createEnvMap() {
Map<String, String> cheese = new HashMap<>();
cheese.put("cheese", "french");
Map<String, String> envCheese = new HashMap<>();
envCheese.put("env.CHEESE", "french");
Map<String, Map<String, String>> envMap = new HashMap<>();
envMap.put("master", cheese);
envMap.put("worker", envCheese);
return envMap;
}
}

View File

@ -0,0 +1,405 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.client;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.slider.common.params.Arguments;
import org.apache.slider.common.params.ClientArgs;
import org.apache.slider.common.tools.SliderFileSystem;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.core.exceptions.BadCommandArgumentsException;
import org.apache.slider.core.exceptions.SliderException;
import org.apache.slider.core.main.ServiceLauncher;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
/**
* Test a keytab installation.
*/
public class TestKeytabCommandOptions extends SliderTestBase {
private static SliderFileSystem testFileSystem;
@Before
public void setupFilesystem() throws IOException {
org.apache.hadoop.fs.FileSystem fileSystem = new RawLocalFileSystem();
YarnConfiguration configuration = SliderUtils.createConfiguration();
fileSystem.setConf(configuration);
testFileSystem = new SliderFileSystem(fileSystem, configuration);
File testFolderDir = new File(testFileSystem
.buildKeytabInstallationDirPath("").toUri().getPath());
FileUtils.deleteDirectory(testFolderDir);
}
@Test
public void testInstallKeytab() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder"));
Path installedPath = new Path(testFileSystem
.buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
File installedKeytab = new File(installedPath.toUri().getPath());
assertTrue(installedKeytab.exists());
assertEquals(FileUtils.readFileToString(installedKeytab),
FileUtils.readFileToString(localKeytab));
}
@Test
public void testInstallThenDeleteKeytab() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder"));
Path installedPath = new Path(testFileSystem
.buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
File installedKeytab = new File(installedPath.toUri().getPath());
assertTrue(installedKeytab.exists());
assertEquals(FileUtils.readFileToString(installedKeytab),
FileUtils.readFileToString(localKeytab));
launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABDELETE,
ClientArgs.ARG_KEYTAB,
localKeytab.getName(),
Arguments.ARG_FOLDER,
"testFolder"));
assertFalse(installedKeytab.exists());
}
@Test
public void testInstallThenListKeytab() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder"));
Path installedPath = new Path(testFileSystem
.buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
File installedKeytab = new File(installedPath.toUri().getPath());
assertTrue(installedKeytab.exists());
assertEquals(FileUtils.readFileToString(installedKeytab),
FileUtils.readFileToString(localKeytab));
// install an additional copy into another folder to test listing
launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder2"));
TestAppender testAppender = new TestAppender();
Logger.getLogger(SliderClient.class).addAppender(testAppender);
try {
launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABLIST)
);
assertEquals(3, testAppender.events.size());
String msg = (String) testAppender.events.get(1).getMessage();
assertTrue(msg.contains("/.slider/keytabs/testFolder"));
assertTrue(msg.endsWith(installedKeytab.getName()));
msg = (String) testAppender.events.get(2).getMessage();
assertTrue(msg.contains("/.slider/keytabs/testFolder"));
assertTrue(msg.endsWith(installedKeytab.getName()));
} finally {
Logger.getLogger(SliderClient.class).removeAppender(testAppender);
}
// now listing while specifying the folder name
testAppender = new TestAppender();
Logger.getLogger(SliderClient.class).addAppender(testAppender);
try {
launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABLIST,
Arguments.ARG_FOLDER,
"testFolder"));
assertEquals(2, testAppender.events.size());
String msg = (String) testAppender.events.get(1).getMessage();
assertTrue(msg.contains("/.slider/keytabs/testFolder/" +
installedKeytab.getName()));
} finally {
Logger.getLogger(SliderClient.class).removeAppender(testAppender);
}
}
@Test
public void testDeleteNonExistentKeytab() throws Throwable {
// create a mock keytab file
YarnConfiguration conf = SliderUtils.createConfiguration();
try {
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABDELETE,
ClientArgs.ARG_KEYTAB,
"HeyIDontExist.keytab",
Arguments.ARG_FOLDER,
"testFolder"));
fail("expected BadCommandArgumentsException from launch");
} catch (BadCommandArgumentsException e) {
// expected
}
}
@Test
public void testInstallKeytabWithNoFolder() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
try {
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath()));
fail("expected BadCommandArgumentsException from launch");
} catch (BadCommandArgumentsException e) {
// expected
}
}
@Test
public void testInstallKeytabWithNoKeytab() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
try {
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_FOLDER,
"testFolder"));
fail("expected BadCommandArgumentsException from launch");
} catch (BadCommandArgumentsException e) {
// expected
}
}
@Test
public void testInstallKeytabAllowingOverwrite() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder"));
Path installedPath = new Path(testFileSystem
.buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
File installedKeytab = new File(installedPath.toUri().getPath());
assertTrue(installedKeytab.exists());
assertEquals(FileUtils.readFileToString(installedKeytab), FileUtils
.readFileToString(localKeytab));
launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder",
Arguments.ARG_OVERWRITE)
);
assertTrue(installedKeytab.exists());
assertEquals(FileUtils.readFileToString(installedKeytab),
FileUtils.readFileToString(localKeytab));
}
@Test
public void testInstallKeytabNotAllowingOverwrite() throws Throwable {
// create a mock keytab file
File localKeytab =
FileUtil.createLocalTempFile(getTempLocation(), "test", true);
String contents = UUID.randomUUID().toString();
FileUtils.write(localKeytab, contents);
YarnConfiguration conf = SliderUtils.createConfiguration();
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder"));
Path installedPath = new Path(testFileSystem
.buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
File installedKeytab = new File(installedPath.toUri().getPath());
assertTrue(installedKeytab.exists());
assertEquals(FileUtils.readFileToString(installedKeytab),
FileUtils.readFileToString(localKeytab));
try {
launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
localKeytab.getAbsolutePath(),
Arguments.ARG_FOLDER,
"testFolder"));
fail("expected BadCommandArgumentsException from launch");
} catch (BadCommandArgumentsException e) {
// expected
}
}
@Test
public void testInstallKeytabWithMissingKeytab() throws Throwable {
// create a mock keytab file
YarnConfiguration conf = SliderUtils.createConfiguration();
try {
ServiceLauncher launcher = launch(TestSliderClient.class,
conf,
Arrays.asList(
ClientArgs.ACTION_KEYTAB,
ClientArgs.ARG_KEYTABINSTALL,
ClientArgs.ARG_KEYTAB,
"HeyIDontExist.keytab",
Arguments.ARG_FOLDER,
"testFolder"));
fail("expected BadCommandArgumentsException from launch");
} catch (BadCommandArgumentsException e) {
// expected
}
}
private File getTempLocation() {
return new File(System.getProperty("user.dir") + "/target");
}
/**
* Test SliderClient with overridden filesystem.
*/
public static class TestSliderClient extends SliderClient {
public TestSliderClient() {
super();
}
@Override
protected void initHadoopBinding() throws IOException, SliderException {
sliderFileSystem = testFileSystem;
}
}
/**
* Appender that captures logging events.
*/
public static class TestAppender extends AppenderSkeleton {
private List<LoggingEvent> events = new ArrayList<>();
public void close() {}
public boolean requiresLayout() {
return false;
}
@Override
protected void append(LoggingEvent event) {
events.add(event);
}
}
}

View File

@ -0,0 +1,142 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.client;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.slider.common.SliderXmlConfKeys;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.server.appmaster.model.mock.MockApplicationId;
import org.apache.slider.utils.SliderTestBase;
import org.easymock.EasyMock;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.easymock.PowerMock;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Map;
/**
* Test slider client methods.
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest(SliderUtils.class)
public class TestSliderClientMethods extends SliderTestBase {
protected static final Logger LOG =
LoggerFactory.getLogger(TestSliderClientMethods.class);
static final String AM_ENV = "LD_LIBRARY_PATH";
static final String PLACEHOLDER_KEY = "${distro.version}";
static final String PLACEHOLDER_SYSTEM_KEY = "DISTRO_VERSION";
static final String PLACEHOLDER_VALUE = "1.0.0";
static final String AM_ENV_2 = "PATH";
static final String PLACEHOLDER_KEY_2 = "${native.version}";
static final String PLACEHOLDER_SYSTEM_KEY_2 = "NATIVE_VERSION";
static final String PLACEHOLDER_VALUE_2 = "2.0.0";
@Test
public void testGeneratePlaceholderKeyValueMap() throws Throwable {
TestSliderClient testSliderClient = new TestSliderClient();
PowerMock.mockStatic(System.class);
EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
.andReturn(PLACEHOLDER_VALUE).anyTimes();
PowerMock.replayAll();
Map<String, String> placeholders = testSliderClient
.generatePlaceholderKeyValueMap(AM_ENV + "=/usr/lib/" +
PLACEHOLDER_KEY);
Assert.assertTrue(placeholders.containsKey(PLACEHOLDER_KEY));
Assert.assertEquals("Should be equal", PLACEHOLDER_VALUE,
placeholders.get(PLACEHOLDER_KEY));
PowerMock.verifyAll();
LOG.info("Placeholders = {}", placeholders);
}
@Test
public void testSetAmLaunchEnv() throws Throwable {
TestSliderClient testSliderClient = new TestSliderClient();
YarnConfiguration conf = SliderUtils.createConfiguration();
conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/"
+ PLACEHOLDER_KEY);
PowerMock.mockStatic(System.class);
EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
.andReturn(PLACEHOLDER_VALUE);
PowerMock.replayAll();
Map<String, String> amLaunchEnv = testSliderClient.getAmLaunchEnv(conf);
Assert.assertNotNull(amLaunchEnv);
Assert.assertNotNull(amLaunchEnv.get(AM_ENV));
Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV),
(Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") +
"/usr/lib/" + PLACEHOLDER_VALUE);
PowerMock.verifyAll();
LOG.info("amLaunchEnv = {}", amLaunchEnv);
}
@Test
public void testSetAmLaunchEnvMulti() throws Throwable {
TestSliderClient testSliderClient = new TestSliderClient();
YarnConfiguration conf = SliderUtils.createConfiguration();
conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/"
+ PLACEHOLDER_KEY + "," + AM_ENV_2 + "=/usr/bin/" + PLACEHOLDER_KEY_2);
PowerMock.mockStatic(System.class);
EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
.andReturn(PLACEHOLDER_VALUE);
EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY_2))
.andReturn(PLACEHOLDER_VALUE_2);
PowerMock.replayAll();
Map<String, String> amLaunchEnv = testSliderClient.getAmLaunchEnv(conf);
Assert.assertNotNull(amLaunchEnv);
Assert.assertEquals("Should have 2 envs", amLaunchEnv.size(), 2);
Assert.assertNotNull(amLaunchEnv.get(AM_ENV));
Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV),
(Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") +
"/usr/lib/" + PLACEHOLDER_VALUE);
Assert.assertNotNull(amLaunchEnv.get(AM_ENV_2));
Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV_2),
(Shell.WINDOWS ? "%" + AM_ENV_2 + "%;" : "$" + AM_ENV_2 + ":") +
"/usr/bin/" + PLACEHOLDER_VALUE_2);
PowerMock.verifyAll();
LOG.info("amLaunchEnv = " + amLaunchEnv);
}
static class TestSliderClient extends SliderClient {
@Override
public ApplicationId submitApplication(ApplicationSubmissionContext
context)
throws YarnException, IOException {
return new MockApplicationId(1);
}
}
}

View File

@ -0,0 +1,124 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.client;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.common.params.ActionTokensArgs;
import org.apache.slider.common.params.Arguments;
import org.apache.slider.common.params.SliderActions;
import org.apache.slider.core.exceptions.BadClusterStateException;
import org.apache.slider.core.exceptions.NotFoundException;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
import java.util.Arrays;
/**
* Test the argument parsing/validation logic.
*/
public class TestSliderTokensCommand extends SliderTestBase {
private static YarnConfiguration config = createTestConfig();
public static YarnConfiguration createTestConfig() {
YarnConfiguration configuration = new YarnConfiguration();
configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032");
return configuration;
}
@Test
public void testBadSourceArgs() throws Throwable {
launchExpectingException(SliderClient.class,
config,
ActionTokensArgs.DUPLICATE_ARGS,
Arrays.asList(SliderActions.ACTION_TOKENS,
Arguments.ARG_SOURCE, "target/tokens.bin",
Arguments.ARG_OUTPUT, "target/tokens.bin"
));
}
@Test
public void testKTNoPrincipal() throws Throwable {
launchExpectingException(SliderClient.class,
config,
ActionTokensArgs.MISSING_KT_PROVIDER,
Arrays.asList(SliderActions.ACTION_TOKENS,
Arguments.ARG_KEYTAB, "target/keytab"
));
}
@Test
public void testPrincipalNoKT() throws Throwable {
launchExpectingException(SliderClient.class,
config,
ActionTokensArgs.MISSING_KT_PROVIDER,
Arrays.asList(SliderActions.ACTION_TOKENS,
Arguments.ARG_PRINCIPAL, "bob@REALM"
));
}
/**
* A missing keytab is an error.
* @throws Throwable
*/
@Test
public void testMissingKT() throws Throwable {
Throwable ex = launchExpectingException(SliderClient.class,
config,
TokensOperation.E_NO_KEYTAB,
Arrays.asList(SliderActions.ACTION_TOKENS,
Arguments.ARG_PRINCIPAL, "bob@REALM",
Arguments.ARG_KEYTAB, "target/keytab"
));
if (!(ex instanceof NotFoundException)) {
throw ex;
}
}
@Test
public void testMissingSourceFile() throws Throwable {
Throwable ex = launchExpectingException(SliderClient.class,
config,
TokensOperation.E_MISSING_SOURCE_FILE,
Arrays.asList(SliderActions.ACTION_TOKENS,
Arguments.ARG_SOURCE, "target/tokens.bin"
));
if (!(ex instanceof NotFoundException)) {
throw ex;
}
}
@Test
public void testListHarmlessWhenInsecure() throws Throwable {
execSliderCommand(0, config, Arrays.asList(SliderActions.ACTION_TOKENS));
}
@Test
public void testCreateFailsWhenInsecure() throws Throwable {
Throwable ex = launchExpectingException(SliderClient.class,
config,
TokensOperation.E_INSECURE,
Arrays.asList(SliderActions.ACTION_TOKENS,
Arguments.ARG_OUTPUT, "target/tokens.bin"
));
if (!(ex instanceof BadClusterStateException)) {
throw ex;
}
}
}

View File

@ -0,0 +1,122 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
/**
* Test cluster name validation.
*/
public class TestClusterNames {
void assertValidName(String name) {
boolean valid = SliderUtils.isClusternameValid(name);
Assert.assertTrue("Clustername '" + name + "' mistakenly declared invalid",
valid);
}
void assertInvalidName(String name) {
boolean valid = SliderUtils.isClusternameValid(name);
Assert.assertFalse("Clustername '\" + name + \"' mistakenly declared valid",
valid);
}
void assertInvalid(List<String> names) {
for (String name : names) {
assertInvalidName(name);
}
}
void assertValid(List<String> names) {
for (String name : names) {
assertValidName(name);
}
}
@Test
public void testEmptyName() throws Throwable {
assertInvalidName("");
}
@Test
public void testSpaceName() throws Throwable {
assertInvalidName(" ");
}
@Test
public void testLeadingHyphen() throws Throwable {
assertInvalidName("-hyphen");
}
@Test
public void testTitleLetters() throws Throwable {
assertInvalidName("Title");
}
@Test
public void testCapitalLetters() throws Throwable {
assertInvalidName("UPPER-CASE-CLUSTER");
}
@Test
public void testInnerBraced() throws Throwable {
assertInvalidName("a[a");
}
@Test
public void testLeadingBrace() throws Throwable {
assertInvalidName("[");
}
@Test
public void testNonalphaLeadingChars() throws Throwable {
assertInvalid(Arrays.asList(
"[a", "#", "@", "=", "*", "."
));
}
@Test
public void testNonalphaInnerChars() throws Throwable {
assertInvalid(Arrays.asList(
"a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i"
));
}
@Test
public void testClusterValid() throws Throwable {
assertValidName("cluster");
}
@Test
public void testValidNames() throws Throwable {
assertValid(Arrays.asList(
"cluster",
"cluster1",
"very-very-very-long-cluster-name",
"c1234567890"
));
}
}

View File

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.slider.utils.YarnMiniClusterTestBase;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.Map;
/**
* Test config helper.
*/
public class TestConfigHelper extends YarnMiniClusterTestBase {
@Test
public void testConfigLoaderIteration() throws Throwable {
String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\" " +
"standalone=\"no\"?><configuration><property><name>key</name>" +
"<value>value</value><source>programatically</source></property>" +
"</configuration>";
InputStream ins = new ByteArrayInputStream(xml.getBytes("UTF8"));
Configuration conf = new Configuration(false);
conf.addResource(ins);
Configuration conf2 = new Configuration(false);
for (Map.Entry<String, String> entry : conf) {
conf2.set(entry.getKey(), entry.getValue(), "src");
}
}
@Test
public void testConfigDeprecation() throws Throwable {
ConfigHelper.registerDeprecatedConfigItems();
Configuration conf = new Configuration(false);
// test deprecated items here
}
}

View File

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.utils.YarnMiniClusterTestBase;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URI;
/**
* Test config helper loading configuration from HDFS.
*/
public class TestConfigHelperHDFS extends YarnMiniClusterTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestConfigHelperHDFS.class);
@Test
public void testConfigHelperHDFS() throws Throwable {
YarnConfiguration config = getConfiguration();
createMiniHDFSCluster("testConfigHelperHDFS", config);
Configuration conf = new Configuration(false);
conf.set("key", "value");
URI fsURI = new URI(getFsDefaultName());
Path root = new Path(fsURI);
Path confPath = new Path(root, "conf.xml");
FileSystem dfs = FileSystem.get(fsURI, config);
ConfigHelper.saveConfig(dfs, confPath, conf);
//load time
Configuration loaded = ConfigHelper.loadConfiguration(dfs, confPath);
LOG.info(ConfigHelper.dumpConfigToString(loaded));
assertEquals("value", loaded.get("key"));
}
}

View File

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Test execution environment.
*/
public class TestExecutionEnvironment extends SliderTestBase {
protected static final Logger LOG =
LoggerFactory.getLogger(TestExecutionEnvironment.class);
@Test
public void testClientEnv() throws Throwable {
SliderUtils.validateSliderClientEnvironment(LOG);
}
@Test
public void testWinutils() throws Throwable {
SliderUtils.maybeVerifyWinUtilsValid();
}
@Test
public void testServerEnv() throws Throwable {
SliderUtils.validateSliderServerEnvironment(LOG, true);
}
@Test
public void testServerEnvNoDependencies() throws Throwable {
SliderUtils.validateSliderServerEnvironment(LOG, false);
}
@Test
public void testopenSSLEnv() throws Throwable {
SliderUtils.validateOpenSSLEnv(LOG);
}
@Test
public void testValidatePythonEnv() throws Throwable {
SliderUtils.validatePythonEnv(LOG);
}
@Test
public void testNativeLibs() throws Throwable {
assertNativeLibrariesPresent();
}
}

View File

@ -0,0 +1,49 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
import java.net.URI;
/**
* Test slider utils.
*/
public class TestMiscSliderUtils extends SliderTestBase {
public static final String CLUSTER1 = "cluster1";
@Test
public void testPurgeTempDir() throws Throwable {
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.get(new URI("file:///"), configuration);
SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
Path inst = sliderFileSystem.createAppInstanceTempPath(CLUSTER1, "001");
assertTrue(fs.exists(inst));
sliderFileSystem.purgeAppInstanceTempFiles(CLUSTER1);
assertFalse(fs.exists(inst));
}
}

View File

@ -0,0 +1,184 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.core.exceptions.SliderException;
import org.junit.Test;
import java.net.ServerSocket;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.*;
/**
* Test finding a port in a range.
*/
public class TestPortScan {
@Test
public void testScanPorts() throws Throwable {
ServerSocket server = new ServerSocket(0);
try {
int serverPort = server.getLocalPort();
assertFalse(SliderUtils.isPortAvailable(serverPort));
int port = SliderUtils.findFreePort(serverPort, 10);
assertTrue(port > 0 && serverPort < port);
} finally {
server.close();
}
}
@Test
public void testRequestedPortsLogic() throws Throwable {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("5,6,8-10, 11,14 ,20 - 22");
List<Integer> ports = portScanner.getRemainingPortsToCheck();
List<Integer> expectedPorts =
Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22);
assertEquals(expectedPorts, ports);
}
@Test
public void testRequestedPortsOutOfOrder() throws Throwable {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("8-10,5,6, 11,20 - 22, 14 ");
List<Integer> ports = portScanner.getRemainingPortsToCheck();
List<Integer> expectedPorts =
Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22);
assertEquals(expectedPorts, ports);
}
@Test
public void testFindAvailablePortInRange() throws Throwable {
ServerSocket server = new ServerSocket(0);
try {
int serverPort = server.getLocalPort();
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3));
int port = portScanner.getAvailablePort();
assertNotEquals(port, serverPort);
assertTrue(port >= serverPort -1 && port <= serverPort + 3);
} finally {
server.close();
}
}
@Test
public void testFindAvailablePortInList() throws Throwable {
ServerSocket server = new ServerSocket(0);
try {
int serverPort = server.getLocalPort();
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("" + (serverPort-1) + ", " + (serverPort + 1));
int port = portScanner.getAvailablePort();
assertNotEquals(port, serverPort);
assertTrue(port == serverPort -1 || port == serverPort + 1);
} finally {
server.close();
}
}
@Test
public void testNoAvailablePorts() throws Throwable {
ServerSocket server1 = new ServerSocket(0);
ServerSocket server2 = new ServerSocket(0);
try {
int serverPort1 = server1.getLocalPort();
int serverPort2 = server2.getLocalPort();
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("" + serverPort1+ ", " + serverPort2);
try {
portScanner.getAvailablePort();
fail("expected SliderException");
} catch (SliderException e) {
// expected
}
} finally {
server1.close();
server2.close();
}
}
@Test
public void testPortRemovedFromRange() throws Throwable {
ServerSocket server = new ServerSocket(0);
try {
int serverPort = server.getLocalPort();
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3));
int port = portScanner.getAvailablePort();
assertNotEquals(port, serverPort);
assertTrue(port >= serverPort -1 && port <= serverPort + 3);
assertFalse(portScanner.getRemainingPortsToCheck().contains(port));
} finally {
server.close();
}
}
@Test(expected = BadConfigException.class)
public void testBadRange() throws BadConfigException {
PortScanner portScanner = new PortScanner();
// note the em dash
portScanner.setPortRange("20002010");
}
@Test(expected = BadConfigException.class)
public void testEndBeforeStart() throws BadConfigException {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("2001-2000");
}
@Test(expected = BadConfigException.class)
public void testEmptyRange() throws BadConfigException {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("");
}
@Test(expected = BadConfigException.class)
public void testBlankRange() throws BadConfigException {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange(" ");
}
@Test
public void testExtraComma() throws BadConfigException {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("2000-2001, ");
List<Integer> ports = portScanner.getRemainingPortsToCheck();
List<Integer> expectedPorts = Arrays.asList(2000, 2001);
assertEquals(expectedPorts, ports);
}
@Test
public void testExtraCommas() throws BadConfigException {
PortScanner portScanner = new PortScanner();
portScanner.setPortRange("2000-2001,, ,2003,");
List<Integer> ports = portScanner.getRemainingPortsToCheck();
List<Integer> expectedPorts = Arrays.asList(2000, 2001, 2003);
assertEquals(expectedPorts, ports);
}
}

View File

@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.slider.common.SliderXmlConfKeys;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
/**
* Test slider file system.
*/
public class TestSliderFileSystem extends SliderTestBase {
private static Configuration defaultConfiguration() {
return new Configuration();
}
private static Configuration createConfigurationWithKV(String key, String
value) {
Configuration conf = defaultConfiguration();
conf.set(key, value);
return conf;
}
@Test
public void testSliderBasePathDefaultValue() throws Throwable {
Configuration configuration = defaultConfiguration();
FileSystem fileSystem = FileSystem.get(configuration);
SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration);
assertEquals(fs2.getBaseApplicationPath(), new Path(fileSystem
.getHomeDirectory(), ".slider"));
}
@Test
public void testSliderBasePathCustomValue() throws Throwable {
Configuration configuration = createConfigurationWithKV(SliderXmlConfKeys
.KEY_SLIDER_BASE_PATH, "/slider/cluster");
FileSystem fileSystem = FileSystem.get(configuration);
SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration);
assertEquals(fs2.getBaseApplicationPath(), new Path("/slider/cluster"));
}
}

View File

@ -0,0 +1,97 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.slider.utils.SliderTestUtils;
import org.junit.Test;
import org.junit.internal.AssumptionViolatedException;
/**
* Test slider test utils.
*/
public class TestSliderTestUtils extends SliderTestUtils {
@Test
public void testAssumeTrue() throws Throwable {
try {
assume(true, "true");
} catch (AssumptionViolatedException e) {
throw new Exception(e);
}
}
@Test
public void testAssumeFalse() throws Throwable {
try {
assume(false, "false");
fail("expected an exception");
} catch (AssumptionViolatedException ignored) {
//expected
}
}
@Test
public void testAssumeBoolOptionSetInConf() throws Throwable {
Configuration conf = new Configuration(false);
conf.set("key", "true");
try {
assumeBoolOption(conf, "key", false);
} catch (AssumptionViolatedException e) {
throw new Exception(e);
}
}
@Test
public void testAssumeBoolOptionUnsetInConf() throws Throwable {
Configuration conf = new Configuration(false);
try {
assumeBoolOption(conf, "key", true);
} catch (AssumptionViolatedException e) {
throw new Exception(e);
}
}
@Test
public void testAssumeBoolOptionFalseInConf() throws Throwable {
Configuration conf = new Configuration(false);
conf.set("key", "false");
try {
assumeBoolOption(conf, "key", true);
fail("expected an exception");
} catch (AssumptionViolatedException ignored) {
//expected
}
}
@Test
public void testAssumeBoolOptionFalseUnsetInConf() throws Throwable {
Configuration conf = new Configuration(false);
try {
assumeBoolOption(conf, "key", false);
fail("expected an exception");
} catch (AssumptionViolatedException ignored) {
//expected
}
}
}

View File

@ -17,13 +17,9 @@
package org.apache.slider.common.tools;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
import org.apache.slider.tools.TestUtility;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
@ -33,7 +29,6 @@ import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
@ -44,26 +39,6 @@ public class TestSliderUtils {
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@Test
public void testGetMetaInfoStreamFromZip() throws Exception {
String zipFileName = TestUtility.createAppPackage(
folder,
"testpkg",
"test.zip",
"target/test-classes/org/apache/slider/common/tools/test");
Configuration configuration = new Configuration();
FileSystem fs = FileSystem.getLocal(configuration);
log.info("fs working dir is {}", fs.getWorkingDirectory().toString());
SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
InputStream stream = SliderUtils.getApplicationResourceInputStream(
sliderFileSystem.getFileSystem(),
new Path(zipFileName),
"metainfo.xml");
Assert.assertTrue(stream != null);
Assert.assertTrue(stream.available() > 0);
}
@Test
public void testTruncate() {
Assert.assertEquals(SliderUtils.truncate(null, 5), null);

View File

@ -0,0 +1,177 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.util.Shell;
import org.apache.slider.utils.YarnMiniClusterTestBase;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.net.URI;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
/**
* Test windows support.
*/
public class TestWindowsSupport extends YarnMiniClusterTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestWindowsSupport.class);
private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
Pattern.compile("^/?[a-zA-Z]:");
public static final String WINDOWS_FILE =
"C:\\Users\\Administrator\\AppData\\Local\\Temp" +
"\\junit3180177850133852404\\testpkg\\appdef_1.zip";
private static boolean hasWindowsDrive(String path) {
return HAS_DRIVE_LETTER_SPECIFIER.matcher(path).find();
}
private static int startPositionWithoutWindowsDrive(String path) {
if (hasWindowsDrive(path)) {
return path.charAt(0) == '/' ? 3 : 2;
} else {
return 0;
}
}
@Test
public void testHasWindowsDrive() throws Throwable {
assertTrue(hasWindowsDrive(WINDOWS_FILE));
}
@Test
public void testStartPosition() throws Throwable {
assertEquals(2, startPositionWithoutWindowsDrive(WINDOWS_FILE));
}
@Test
public void testPathHandling() throws Throwable {
assumeWindows();
Path path = new Path(WINDOWS_FILE);
URI uri = path.toUri();
// assert "file" == uri.scheme
assertNull(uri.getAuthority());
Configuration conf = new Configuration();
FileSystem localfs = FileSystem.get(uri, conf);
assertTrue(localfs instanceof ChecksumFileSystem);
try {
FileStatus stat = localfs.getFileStatus(path);
fail("expected an exception, got " + stat);
} catch (FileNotFoundException fnfe) {
// expected
}
try {
FSDataInputStream appStream = localfs.open(path);
} catch (FileNotFoundException fnfe) {
// expected
}
}
@Test
public void testExecNonexistentBinary() throws Throwable {
assumeWindows();
List<String> commands = Arrays.asList("undefined-application", "--version");
try {
exec(0, commands);
fail("expected an exception");
} catch (ServiceStateException e) {
if (!(e.getCause() instanceof FileNotFoundException)) {
throw e;
}
}
}
@Test
public void testExecNonexistentBinary2() throws Throwable {
assumeWindows();
assertFalse(doesAppExist(Arrays.asList("undefined-application",
"--version")));
}
@Test
public void testEmitKillCommand() throws Throwable {
int result = killJavaProcesses("regionserver", 9);
// we know the exit code if there is no supported kill operation
assertTrue(getKillSupported() || result == -1);
}
@Test
public void testHadoopHomeDefined() throws Throwable {
assumeWindows();
String hadoopHome = Shell.getHadoopHome();
LOG.info("HADOOP_HOME={}", hadoopHome);
}
@Test
public void testHasWinutils() throws Throwable {
assumeWindows();
SliderUtils.maybeVerifyWinUtilsValid();
}
@Test
public void testExecWinutils() throws Throwable {
assumeWindows();
String winUtilsPath = Shell.getWinUtilsPath();
assertTrue(SliderUtils.isSet(winUtilsPath));
File winUtils = new File(winUtilsPath);
LOG.debug("Winutils is at {}", winUtils);
exec(0, Arrays.asList(winUtilsPath, "systeminfo"));
}
@Test
public void testPath() throws Throwable {
String path = extractPath();
LOG.info("Path value = {}", path);
}
@Test
public void testFindJavac() throws Throwable {
String name = Shell.WINDOWS ? "javac.exe" : "javac";
assertNotNull(locateExecutable(name));
}
@Test
public void testHadoopDLL() throws Throwable {
assumeWindows();
// split the path
File exepath = locateExecutable("HADOOP.DLL");
assertNotNull(exepath);
LOG.info("Hadoop DLL at: {}", exepath);
}
}

View File

@ -0,0 +1,187 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.common.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
import org.apache.slider.client.SliderClient;
import org.apache.slider.core.zk.ZKIntegration;
import org.apache.slider.utils.KeysForTests;
import org.apache.slider.utils.YarnZKMiniClusterTestBase;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.List;
/**
* Test ZK integration.
*/
public class TestZKIntegration extends YarnZKMiniClusterTestBase implements
KeysForTests {
private static final Logger LOG =
LoggerFactory.getLogger(TestZKIntegration.class);
public static final String USER = KeysForTests.USERNAME;
public static final int CONNECT_TIMEOUT = 5000;
private ZKIntegration zki;
@Before
public void createCluster() {
Configuration conf = getConfiguration();
String name = methodName.getMethodName();
File zkdir = new File("target/zk/${name}");
FileUtil.fullyDelete(zkdir);
conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkdir
.getAbsolutePath());
createMicroZKCluster("-"+ name, conf);
}
@After
public void closeZKI() throws IOException {
if (zki != null) {
zki.close();
zki = null;
}
}
public ZKIntegration initZKI() throws IOException, InterruptedException {
zki = createZKIntegrationInstance(
getZKBinding(), methodName.getMethodName(), true, false,
CONNECT_TIMEOUT);
return zki;
}
@Test
public void testListUserClustersWithoutAnyClusters() throws Throwable {
assertHasZKCluster();
initZKI();
String userPath = ZKIntegration.mkSliderUserPath(USER);
List<String> clusters = this.zki.getClusters();
assertTrue(SliderUtils.isEmpty(clusters));
}
@Test
public void testListUserClustersWithOneCluster() throws Throwable {
assertHasZKCluster();
initZKI();
String userPath = ZKIntegration.mkSliderUserPath(USER);
String fullPath = zki.createPath(userPath, "/cluster-",
ZooDefs.Ids.OPEN_ACL_UNSAFE,
CreateMode.EPHEMERAL_SEQUENTIAL);
LOG.info("Ephemeral path {}", fullPath);
List<String> clusters = zki.getClusters();
assertEquals(1, clusters.size());
assertTrue(fullPath.endsWith(clusters.get(0)));
}
@Test
public void testListUserClustersWithTwoCluster() throws Throwable {
initZKI();
String userPath = ZKIntegration.mkSliderUserPath(USER);
String c1 = createEphemeralChild(zki, userPath);
LOG.info("Ephemeral path $c1");
String c2 = createEphemeralChild(zki, userPath);
LOG.info("Ephemeral path $c2");
List<String> clusters = zki.getClusters();
assertEquals(2, clusters.size());
assertTrue((c1.endsWith(clusters.get(0)) && c2.endsWith(clusters.get(1))) ||
(c1.endsWith(clusters.get(1)) && c2.endsWith(clusters.get(0))));
}
@Test
public void testCreateAndDeleteDefaultZKPath() throws Throwable {
MockSliderClient client = new MockSliderClient();
String path = client.createZookeeperNodeInner("cl1", true);
zki = client.getLastZKIntegration();
String zkPath = ZKIntegration.mkClusterPath(USER, "cl1");
assertEquals("zkPath must be as expected", zkPath,
"/services/slider/users/" + USER + "/cl1");
assertEquals(path, zkPath);
assertNull("ZKIntegration should be null.", zki);
zki = createZKIntegrationInstance(getZKBinding(), "cl1", true, false,
CONNECT_TIMEOUT);
assertFalse(zki.exists(zkPath));
path = client.createZookeeperNodeInner("cl1", false);
zki = client.getLastZKIntegration();
assertNotNull(zki);
assertEquals("zkPath must be as expected", zkPath,
"/services/slider/users/" + USER + "/cl1");
assertEquals(path, zkPath);
assertTrue(zki.exists(zkPath));
zki.createPath(zkPath, "/cn", ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode
.PERSISTENT);
assertTrue(zki.exists(zkPath + "/cn"));
client.deleteZookeeperNode("cl1");
assertFalse(zki.exists(zkPath));
}
public static String createEphemeralChild(ZKIntegration zki, String userPath)
throws KeeperException, InterruptedException {
return zki.createPath(userPath, "/cluster-",
ZooDefs.Ids.OPEN_ACL_UNSAFE,
CreateMode.EPHEMERAL_SEQUENTIAL);
}
/**
* Test slider client that overriddes ZK client.
*/
public class MockSliderClient extends SliderClient {
private ZKIntegration zki;
@Override
public String getUsername() {
return USER;
}
@Override
protected ZKIntegration getZkClient(String clusterName, String user) {
try {
zki = createZKIntegrationInstance(getZKBinding(), clusterName, true,
false, CONNECT_TIMEOUT);
} catch (Exception e) {
fail("creating ZKIntergration threw an exception");
}
return zki;
}
@Override
public Configuration getConfig() {
return new Configuration();
}
public ZKIntegration getLastZKIntegration() {
return zki;
}
}
}

View File

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.core.conf;
import org.apache.slider.api.resource.Application;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
/**
* Names of the example configs.
*/
public final class ExampleConfResources {
public static final String APP_JSON = "app.json";
public static final String APP_RES = "app-resolved.json";
public static final String OVERRIDE_JSON = "app-override.json";
public static final String OVERRIDE_RES = "app-override-resolved.json";
public static final String PACKAGE = "/org/apache/slider/core/conf/examples/";
private static final String[] ALL_EXAMPLES = {APP_JSON, APP_RES,
OVERRIDE_JSON, OVERRIDE_RES};
public static final List<String> ALL_EXAMPLE_RESOURCES = new ArrayList<>();
static {
for (String example : ALL_EXAMPLES) {
ALL_EXAMPLE_RESOURCES.add(PACKAGE + example);
}
}
private ExampleConfResources() {
}
static Application loadResource(String name) throws IOException {
return JSON_SER_DESER.fromResource(PACKAGE + name);
}
}

View File

@ -0,0 +1,64 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.core.conf;
import org.apache.slider.api.resource.Application;
import org.apache.slider.common.tools.SliderUtils;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.Arrays;
import java.util.Collection;
import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
/**
* Test loading example resources.
*/
@RunWith(value = Parameterized.class)
public class TestConfTreeLoadExamples extends Assert {
private String resource;
public TestConfTreeLoadExamples(String resource) {
this.resource = resource;
}
@Parameterized.Parameters
public static Collection<String[]> filenames() {
String[][] stringArray = new String[ExampleConfResources
.ALL_EXAMPLE_RESOURCES.size()][1];
int i = 0;
for (String s : ExampleConfResources.ALL_EXAMPLE_RESOURCES) {
stringArray[i++][0] = s;
}
return Arrays.asList(stringArray);
}
@Test
public void testLoadResource() throws Throwable {
try {
Application application = JSON_SER_DESER.fromResource(resource);
SliderUtils.resolve(application);
} catch (Exception e) {
throw new Exception("exception loading " + resource + ":" + e.toString());
}
}
}

View File

@ -0,0 +1,118 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.core.conf;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Configuration;
import org.apache.slider.common.tools.SliderUtils;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.slider.api.InternalKeys.CHAOS_MONKEY_INTERVAL;
import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS;
import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS;
import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES;
import static org.apache.slider.core.conf.ExampleConfResources.APP_JSON;
import static org.apache.slider.core.conf.ExampleConfResources.OVERRIDE_JSON;
/**
* Test global configuration resolution.
*/
public class TestConfigurationResolve extends Assert {
protected static final Logger LOG =
LoggerFactory.getLogger(TestConfigurationResolve.class);
@Test
public void testOverride() throws Throwable {
Application orig = ExampleConfResources.loadResource(OVERRIDE_JSON);
Configuration global = orig.getConfiguration();
assertEquals("a", global.getProperty("g1"));
assertEquals("b", global.getProperty("g2"));
Configuration simple = orig.getComponent("simple").getConfiguration();
assertEquals(0, simple.getProperties().size());
Configuration master = orig.getComponent("master").getConfiguration();
assertEquals("m", master.getProperty("name"));
assertEquals("overridden", master.getProperty("g1"));
Configuration worker = orig.getComponent("worker").getConfiguration();
LOG.info("worker = {}", worker);
assertEquals(3, worker.getProperties().size());
assertEquals("worker", worker.getProperty("name"));
assertEquals("overridden-by-worker", worker.getProperty("g1"));
assertNull(worker.getProperty("g2"));
assertEquals("1000", worker.getProperty("timeout"));
// here is the resolution
SliderUtils.resolve(orig);
global = orig.getConfiguration();
LOG.info("global = {}", global);
assertEquals("a", global.getProperty("g1"));
assertEquals("b", global.getProperty("g2"));
simple = orig.getComponent("simple").getConfiguration();
assertEquals(2, simple.getProperties().size());
assertEquals("a", simple.getProperty("g1"));
assertEquals("b", simple.getProperty("g2"));
master = orig.getComponent("master").getConfiguration();
LOG.info("master = {}", master);
assertEquals(3, master.getProperties().size());
assertEquals("m", master.getProperty("name"));
assertEquals("overridden", master.getProperty("g1"));
assertEquals("b", master.getProperty("g2"));
worker = orig.getComponent("worker").getConfiguration();
LOG.info("worker = {}", worker);
assertEquals(4, worker.getProperties().size());
assertEquals("worker", worker.getProperty("name"));
assertEquals("overridden-by-worker", worker.getProperty("g1"));
assertEquals("b", worker.getProperty("g2"));
assertEquals("1000", worker.getProperty("timeout"));
}
@Test
public void testTimeIntervalLoading() throws Throwable {
Application orig = ExampleConfResources.loadResource(APP_JSON);
Configuration conf = orig.getConfiguration();
long s = conf.getPropertyLong(
CHAOS_MONKEY_INTERVAL + SliderUtils.SECONDS,
0);
assertEquals(60, s);
long monkeyInterval = SliderUtils.getTimeRange(conf,
CHAOS_MONKEY_INTERVAL,
DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS,
DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS,
DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES,
0);
assertEquals(60L, monkeyInterval);
}
}

View File

@ -0,0 +1,263 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.other;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
import org.apache.slider.utils.YarnMiniClusterTestBase;
import org.junit.After;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This test class exists to look at permissions of the filesystem, especially
* that created by Mini YARN clusters. On some windows jenkins machines,
* YARN actions were failing as the directories had the wrong permissions
* (i.e. too lax)
*/
public class TestFilesystemPermissions extends YarnMiniClusterTestBase {
private static final Logger LOG = LoggerFactory.getLogger(
TestFilesystemPermissions.class);
private List<File> filesToDelete = new ArrayList<>();
@After
public void deleteFiles() {
for (File f : filesToDelete) {
FileUtil.fullyDelete(f, true);
}
}
@Test
public void testJavaFSOperations() throws Throwable {
assertNativeLibrariesPresent();
File subdir = testDir();
subdir.mkdir();
assertTrue(subdir.isDirectory());
assertTrue(FileUtil.canRead(subdir));
assertTrue(FileUtil.canWrite(subdir));
assertTrue(FileUtil.canExecute(subdir));
}
@Test
public void testDiskCheckerOperations() throws Throwable {
assertNativeLibrariesPresent();
File subdir = testDir();
subdir.mkdir();
DiskChecker checker = new DiskChecker();
checker.checkDir(subdir);
}
@Test
public void testDiskCheckerMkdir() throws Throwable {
assertNativeLibrariesPresent();
File subdir = testDir();
subdir.mkdirs();
DiskChecker checker = new DiskChecker();
checker.checkDir(subdir);
}
/**
* Get a test dir for this method; one that will be deleted on teardown.
* @return a filename unique to this test method
*/
File testDir() {
File parent = new File("target/testfspermissions");
parent.mkdir();
File testdir = new File(parent, methodName.getMethodName());
filesToDelete.add(testdir);
return testdir;
}
@Test
public void testPermsMap() throws Throwable {
File dir = testDir();
String diruri = dir.toURI().toString();
FileContext lfs = createLocalFS(dir, getConfiguration());
getLocalDirsPathPermissionsMap(lfs, diruri);
}
@Test
public void testInitLocaldir() throws Throwable {
File dir = testDir();
String diruri = dir.toURI().toString();
FileContext lfs = createLocalFS(dir, getConfiguration());
initializeLocalDir(lfs, diruri);
List<String> localDirs = getInitializedLocalDirs(lfs, Arrays.asList(
diruri));
assertEquals(1, localDirs.size());
}
@Test
public void testValidateMiniclusterPerms() throws Throwable {
int numLocal = 1;
String cluster = createMiniCluster("", getConfiguration(), 1, numLocal, 1,
false);
File workDir = getMiniCluster().getTestWorkDir();
List<File> localdirs = new ArrayList<>();
for (File file : workDir.listFiles()) {
if (file.isDirectory() && file.getAbsolutePath().contains("-local")) {
// local dir
localdirs.add(file);
}
}
assertEquals(numLocal, localdirs.size());
FileContext lfs = createLocalFS(workDir, getConfiguration());
for (File file : localdirs) {
checkLocalDir(lfs, file.toURI().toString());
}
}
FileContext createLocalFS(File dir, Configuration conf)
throws UnsupportedFileSystemException {
return FileContext.getFileContext(dir.toURI(), conf);
}
/**
* Extracted from ResourceLocalizationService.
* @param lfs
* @param localDir
* @return perms map
* @see ResourceLocalizationService
*/
private Map<Path, FsPermission> getLocalDirsPathPermissionsMap(
FileContext lfs,
String localDir) {
Map<Path, FsPermission> localDirPathFsPermissionsMap = new HashMap<>();
FsPermission defaultPermission =
FsPermission.getDirDefault().applyUMask(lfs.getUMask());
FsPermission nmPrivatePermission =
ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask());
Path userDir = new Path(localDir, ContainerLocalizer.USERCACHE);
Path fileDir = new Path(localDir, ContainerLocalizer.FILECACHE);
Path sysDir = new Path(
localDir,
ResourceLocalizationService.NM_PRIVATE_DIR);
localDirPathFsPermissionsMap.put(userDir, defaultPermission);
localDirPathFsPermissionsMap.put(fileDir, defaultPermission);
localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission);
return localDirPathFsPermissionsMap;
}
private boolean checkLocalDir(FileContext lfs, String localDir)
throws IOException {
Map<Path, FsPermission> pathPermissionMap =
getLocalDirsPathPermissionsMap(lfs, localDir);
for (Map.Entry<Path, FsPermission> entry : pathPermissionMap.entrySet()) {
FileStatus status;
status = lfs.getFileStatus(entry.getKey());
if (!status.getPermission().equals(entry.getValue())) {
String msg =
"Permissions incorrectly set for dir " + entry.getKey() +
", should be " + entry.getValue() + ", actual value = " +
status.getPermission();
throw new YarnRuntimeException(msg);
}
}
return true;
}
private void initializeLocalDir(FileContext lfs, String localDir)
throws IOException {
Map<Path, FsPermission> pathPermissionMap =
getLocalDirsPathPermissionsMap(lfs, localDir);
for (Map.Entry<Path, FsPermission> entry : pathPermissionMap.entrySet()) {
FileStatus status;
try {
status = lfs.getFileStatus(entry.getKey());
} catch (FileNotFoundException fs) {
status = null;
}
if (status == null) {
lfs.mkdir(entry.getKey(), entry.getValue(), true);
status = lfs.getFileStatus(entry.getKey());
}
FsPermission perms = status.getPermission();
if (!perms.equals(entry.getValue())) {
lfs.setPermission(entry.getKey(), entry.getValue());
}
}
}
synchronized private List<String> getInitializedLocalDirs(FileContext lfs,
List<String> dirs) throws IOException {
List<String> checkFailedDirs = new ArrayList<String>();
for (String dir : dirs) {
try {
checkLocalDir(lfs, dir);
} catch (YarnRuntimeException e) {
checkFailedDirs.add(dir);
}
}
for (String dir : checkFailedDirs) {
LOG.info("Attempting to initialize " + dir);
initializeLocalDir(lfs, dir);
checkLocalDir(lfs, dir);
}
return dirs;
}
private void createDir(FileContext localFs, Path dir, FsPermission perm)
throws IOException {
if (dir == null) {
return;
}
try {
localFs.getFileStatus(dir);
} catch (FileNotFoundException e) {
createDir(localFs, dir.getParent(), perm);
localFs.mkdir(dir, perm, false);
if (!perm.equals(perm.applyUMask(localFs.getUMask()))) {
localFs.setPermission(dir, perm);
}
}
}
}

View File

@ -0,0 +1,166 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.other;
import org.apache.slider.utils.SliderTestUtils;
import org.apache.slider.utils.TestUtility;
import org.junit.Test;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* This test exists to diagnose local FS permissions.
*/
public class TestLocalDirStatus extends SliderTestUtils {
public static final int SIZE = 0x200000;
@Test
public void testTempDir() throws Throwable {
File tmpf = null;
try {
tmpf = File.createTempFile("testl", ".bin");
createAndReadFile(tmpf, SIZE);
tmpf.delete();
assertFalse(tmpf.exists());
} finally {
if (tmpf != null) {
tmpf.delete();
}
}
}
@Test
public void testTargetDir() throws Throwable {
File target = target();
File tmpf = null;
try {
tmpf = File.createTempFile("testl", ".bin", target);
createAndReadFile(tmpf, SIZE);
tmpf.delete();
assertFalse(tmpf.exists());
} finally {
if (tmpf != null) {
tmpf.delete();
}
}
}
public File target() {
File target = new File("target").getAbsoluteFile();
assertTrue(target.exists());
return target;
}
@Test
public void testRenameInTargetDir() throws Throwable {
File target = target();
File tmpf = null;
File dst= null;
try {
tmpf = File.createTempFile("testl", ".bin", target);
dst = File.createTempFile("test-dest", ".bin", target);
createRenameAndReadFile(tmpf, dst, SIZE);
assertFalse(tmpf.exists());
dst.delete();
} finally {
if (tmpf != null) {
tmpf.delete();
}
if (dst != null) {
dst.delete();
}
}
}
@Test
public void testRenameInTmpDir() throws Throwable {
File tmpf = null;
File dst= null;
try {
tmpf = File.createTempFile("testl", ".bin");
dst = File.createTempFile("test-dest", ".bin");
createRenameAndReadFile(tmpf, dst, SIZE);
assertFalse(tmpf.exists());
dst.delete();
} finally {
if (tmpf != null) {
tmpf.delete();
}
if (dst != null) {
dst.delete();
}
}
}
protected void createAndReadFile(File path, int len) throws IOException {
byte[] dataset = TestUtility.dataset(len, 32, 128);
writeFile(path, dataset);
assertTrue(path.exists());
assertEquals(len, path.length());
byte[] persisted = readFile(path);
TestUtility.compareByteArrays(dataset, persisted, len);
}
protected void createRenameAndReadFile(File src, File dst, int len)
throws IOException {
byte[] dataset = TestUtility.dataset(len, 32, 128);
writeFile(src, dataset);
assertTrue(src.exists());
assertEquals(len, src.length());
dst.delete();
assertFalse(dst.exists());
assertTrue(src.renameTo(dst));
assertEquals(len, dst.length());
byte[] persisted = readFile(dst);
TestUtility.compareByteArrays(dataset, persisted, len);
}
protected void writeFile(File path, byte[] dataset)
throws IOException {
FileOutputStream out = new FileOutputStream(path);
try {
out.write(dataset);
out.flush();
} finally {
out.close();
}
}
protected byte[] readFile(File path) throws IOException {
assertTrue(path.getAbsoluteFile().exists());
assertTrue(path.getAbsoluteFile().isFile());
int len = (int)path.length();
byte[] dataset = new byte[len];
FileInputStream ins = new FileInputStream(path);
try {
ins.read(dataset);
} finally {
ins.close();
}
return dataset;
}
}

View File

@ -0,0 +1,54 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.providers;
import org.apache.slider.providers.docker.DockerKeys;
import org.apache.slider.providers.docker.DockerProviderFactory;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Test provider factories.
*/
public class TestProviderFactory {
@Test
public void testLoadAgentProvider() throws Throwable {
SliderProviderFactory factory = SliderProviderFactory
.createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
assertTrue(factory instanceof DockerProviderFactory);
}
@Test
public void testCreateClientProvider() throws Throwable {
SliderProviderFactory factory = SliderProviderFactory
.createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
assertNotNull(factory.createClientProvider());
}
@Test
public void testCreateProviderByClassname() throws Throwable {
SliderProviderFactory factory = SliderProviderFactory
.createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
assertNotNull(factory.createServerProvider());
assertTrue(factory instanceof DockerProviderFactory);
}
}

View File

@ -0,0 +1,85 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.registry;
import org.apache.slider.core.registry.docstore.PublishedConfigSet;
import org.junit.Assert;
import org.junit.Test;
import java.util.Arrays;
/**
* Test config set name validation.
*/
public class TestConfigSetNaming {
void assertValid(String name) {
PublishedConfigSet.validateName(name);
}
void assertInvalid(String name) {
try {
PublishedConfigSet.validateName(name);
Assert.fail("Invalid name was unexpectedly parsed: " + name);
} catch (IllegalArgumentException expected) {
// expected
}
}
@Test
public void testLowerCase() throws Throwable {
assertValid("abcdefghijklmnopqrstuvwxyz");
}
@Test
public void testUpperCaseInvalid() throws Throwable {
assertInvalid("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
@Test
public void testNumbers() throws Throwable {
assertValid("01234567890");
}
@Test
public void testChars() throws Throwable {
assertValid("a-_+");
}
@Test
public void testInvalids() throws Throwable {
for (String s : Arrays.asList(
"",
" ",
"*",
"a/b",
"b\\a",
"\"",
"'",
"\u0000",
"\u0f00",
"key.value",
"-",
"+",
"_",
"?")) {
assertInvalid(s);
}
}
}

View File

@ -0,0 +1,74 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.registry;
import org.apache.hadoop.registry.client.binding.RegistryUtils;
import org.apache.slider.core.registry.SliderRegistryUtils;
import org.apache.slider.utils.SliderTestUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* Test registry paths.
*/
public class TestRegistryPaths {
@Test
public void testHomedirKerberos() throws Throwable {
String home = RegistryUtils.homePathForUser("hbase@HADOOP.APACHE.ORG");
try {
assertEquals("/users/hbase", home);
} catch (AssertionError e) {
SliderTestUtils.skip("homedir filtering not yet in hadoop registry " +
"module");
}
}
@Test
public void testHomedirKerberosHost() throws Throwable {
String home = RegistryUtils.homePathForUser("hbase/localhost@HADOOP" +
".APACHE.ORG");
try {
assertEquals("/users/hbase", home);
} catch (AssertionError e) {
SliderTestUtils.skip("homedir filtering not yet in hadoop registry " +
"module");
}
}
@Test
public void testRegistryPathForInstance() throws Throwable {
String path = SliderRegistryUtils.registryPathForInstance("instance");
assertTrue(path.endsWith("/instance"));
}
@Test
public void testPathResolution() throws Throwable {
String home = RegistryUtils.homePathForCurrentUser();
assertEquals(home, SliderRegistryUtils.resolvePath("~"));
assertEquals(home +"/", SliderRegistryUtils.resolvePath("~/"));
assertEquals(home +"/something", SliderRegistryUtils.resolvePath(
"~/something"));
assertEquals("~unresolved", SliderRegistryUtils.resolvePath(
"~unresolved"));
}
}

View File

@ -0,0 +1,246 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.actions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.ServiceOperations;
import org.apache.slider.server.appmaster.SliderAppMaster;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.services.workflow.ServiceThreadFactory;
import org.apache.slider.server.services.workflow.WorkflowExecutorService;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Test AM actions.
*/
public class TestActions {
protected static final Logger LOG =
LoggerFactory.getLogger(TestActions.class);
private QueueService queues;
private WorkflowExecutorService<ExecutorService> executorService;
@Before
public void createService() {
queues = new QueueService();
Configuration conf = new Configuration();
queues.init(conf);
queues.start();
executorService = new WorkflowExecutorService<>("AmExecutor",
Executors.newCachedThreadPool(
new ServiceThreadFactory("AmExecutor", true)));
executorService.init(conf);
executorService.start();
}
@After
public void destroyService() {
ServiceOperations.stop(executorService);
ServiceOperations.stop(queues);
}
@Test
public void testBasicService() throws Throwable {
queues.start();
}
@Test
public void testDelayLogic() throws Throwable {
ActionNoteExecuted action = new ActionNoteExecuted("", 1000);
long now = System.currentTimeMillis();
long delay = action.getDelay(TimeUnit.MILLISECONDS);
assertTrue(delay >= 800);
assertTrue(delay <= 1800);
ActionNoteExecuted a2 = new ActionNoteExecuted("a2", 10000);
assertTrue(action.compareTo(a2) < 0);
assertTrue(a2.compareTo(action) > 0);
assertEquals(0, action.compareTo(action));
}
@Test
public void testActionDelayedExecutorTermination() throws Throwable {
long start = System.currentTimeMillis();
ActionStopQueue stopAction = new ActionStopQueue(1000);
queues.scheduledActions.add(stopAction);
queues.run();
AsyncAction take = queues.actionQueue.take();
assertEquals(take, stopAction);
long stop = System.currentTimeMillis();
assertTrue(stop - start > 500);
assertTrue(stop - start < 1500);
}
@Test
public void testImmediateQueue() throws Throwable {
ActionNoteExecuted noteExecuted = new ActionNoteExecuted("executed", 0);
queues.put(noteExecuted);
queues.put(new ActionStopQueue(0));
QueueExecutor ex = new QueueExecutor(queues);
ex.run();
assertTrue(queues.actionQueue.isEmpty());
assertTrue(noteExecuted.executed.get());
}
@Test
public void testActionOrdering() throws Throwable {
ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
ActionStopQueue stop = new ActionStopQueue(1500);
ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800);
List<AsyncAction> actions = Arrays.asList(note1, stop, note2);
Collections.sort(actions);
assertEquals(actions.get(0), note1);
assertEquals(actions.get(1), note2);
assertEquals(actions.get(2), stop);
}
@Test
public void testDelayedQueueWithReschedule() throws Throwable {
ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
ActionStopQueue stop = new ActionStopQueue(1500);
ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800);
assertTrue(note2.compareTo(stop) < 0);
assertTrue(note1.getNanos() < note2.getNanos());
assertTrue(note2.getNanos() < stop.getNanos());
queues.schedule(note1);
queues.schedule(note2);
queues.schedule(stop);
// async to sync expected to run in order
runQueuesToCompletion();
assertTrue(note1.executed.get());
assertTrue(note2.executed.get());
}
public void runQueuesToCompletion() {
queues.run();
assertTrue(queues.scheduledActions.isEmpty());
assertFalse(queues.actionQueue.isEmpty());
QueueExecutor ex = new QueueExecutor(queues);
ex.run();
// flush all stop commands from the queue
queues.flushActionQueue(ActionStopQueue.class);
assertTrue(queues.actionQueue.isEmpty());
}
@Test
public void testRenewedActionFiresOnceAtLeast() throws Throwable {
ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
RenewingAction renewer = new RenewingAction(
note1,
500,
100,
TimeUnit.MILLISECONDS,
3);
queues.schedule(renewer);
ActionStopQueue stop = new ActionStopQueue(4, TimeUnit.SECONDS);
queues.schedule(stop);
// this runs all the delayed actions FIRST, so can't be used
// to play tricks of renewing actions ahead of the stop action
runQueuesToCompletion();
assertEquals(1, renewer.executionCount.intValue());
assertEquals(1, note1.executionCount.intValue());
// assert the renewed item is back in
assertTrue(queues.scheduledActions.contains(renewer));
}
@Test
public void testRenewingActionOperations() throws Throwable {
ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
RenewingAction renewer = new RenewingAction(
note1,
100,
100,
TimeUnit.MILLISECONDS,
3);
queues.renewing("note", renewer);
assertTrue(queues.removeRenewingAction("note"));
queues.stop();
assertTrue(queues.waitForServiceToStop(10000));
}
/**
* Test action.
*/
public class ActionNoteExecuted extends AsyncAction {
private final AtomicBoolean executed = new AtomicBoolean(false);
private final AtomicLong executionTimeNanos = new AtomicLong();
private final AtomicLong executionCount = new AtomicLong();
public ActionNoteExecuted(String text, int delay) {
super(text, delay);
}
@Override
public void execute(
SliderAppMaster appMaster,
QueueAccess queueService,
AppState appState) throws Exception {
LOG.info("Executing {}", name);
executed.set(true);
executionTimeNanos.set(System.nanoTime());
executionCount.incrementAndGet();
LOG.info(this.toString());
synchronized (this) {
this.notify();
}
}
@Override
public String toString() {
return super.toString() + " executed=" + executed.get() + "; count=" +
executionCount.get() + ";";
}
public long getExecutionCount() {
return executionCount.get();
}
}
}

View File

@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.state.RoleStatus;
import static org.apache.slider.api.ResourceKeys.COMPONENT_PLACEMENT_POLICY;
/**
* Class for basis of Anti-affine placement tests; sets up role2
* for anti-affinity.
*/
public class BaseMockAppStateAATest extends BaseMockAppStateTest
implements MockRoles {
/** Role status for the base AA role. */
private RoleStatus aaRole;
/** Role status for the AA role requiring a node with the gpu label. */
private RoleStatus gpuRole;
@Override
public Application buildApplication() {
Application application = factory.newApplication(0, 0, 0)
.name(getTestName());
application.getComponent(ROLE1).getConfiguration().setProperty(
COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy
.ANTI_AFFINITY_REQUIRED));
application.getComponent(ROLE1).getConfiguration().setProperty(
ResourceKeys.YARN_LABEL_EXPRESSION, LABEL_GPU);
application.getComponent(ROLE2).getConfiguration().setProperty(
COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy
.ANTI_AFFINITY_REQUIRED));
return application;
}
@Override
public void setup() throws Exception {
super.setup();
aaRole = lookupRole(ROLE2);
gpuRole = lookupRole(ROLE1);
}
protected RoleStatus getAaRole() {
return aaRole;
}
protected RoleStatus getGpuRole() {
return gpuRole;
}
}

View File

@ -0,0 +1,112 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.slider.core.main.LauncherExitCodes;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.NodeMap;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* Test Anti-affine placement with a cluster of size 1.
*/
public class TestMockAppStateAAOvercapacity extends BaseMockAppStateAATest
implements MockRoles {
private static final int NODES = 1;
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(NODES, 1);
}
void assertAllContainersAA() {
assertAllContainersAA(getAaRole().getKey());
}
/**
*
* @throws Throwable
*/
@Test
public void testOvercapacityRecovery() throws Throwable {
RoleStatus aaRole = getAaRole();
describe("Ask for 1 more than the no of available nodes;" +
"verify the state. kill the allocated container and review");
//more than expected
int desired = 3;
aaRole.setDesired(desired);
assertTrue(appState.getRoleHistory().canPlaceAANodes());
//first request
List<AbstractRMOperation> operations =
appState.reviewRequestAndReleaseNodes();
assertTrue(aaRole.isAARequestOutstanding());
assertEquals(1, aaRole.getRequested());
assertEquals(desired - 1, aaRole.getAAPending());
List<AbstractRMOperation> operationsOut = new ArrayList<>();
// allocate and re-submit
List<RoleInstance> instances = submitOperations(operations,
EMPTY_ID_LIST, operationsOut);
assertEquals(1, instances.size());
assertAllContainersAA();
// expect an outstanding AA request to be unsatisfied
assertTrue(aaRole.getRunning() < aaRole.getDesired());
assertEquals(0, aaRole.getRequested());
assertFalse(aaRole.isAARequestOutstanding());
assertEquals(desired - 1, aaRole.getAAPending());
List<Container> allocatedContainers = engine.execute(operations,
EMPTY_ID_LIST);
assertEquals(0, allocatedContainers.size());
// now lets trigger a failure
NodeMap nodemap = cloneNodemap();
assertEquals(1, nodemap.size());
RoleInstance instance = instances.get(0);
ContainerId cid = instance.getContainerId();
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid, LauncherExitCodes.EXIT_TASK_LAUNCH_FAILURE));
assertTrue(result.containerFailed);
assertEquals(1, aaRole.getFailed());
assertEquals(0, aaRole.getRunning());
List<NodeInstance> availablePlacements = appState.getRoleHistory()
.findNodeForNewAAInstance(aaRole);
assertEquals(1, availablePlacements.size());
describe("expecting a successful review with available placements of " +
availablePlacements);
operations = appState.reviewRequestAndReleaseNodes();
assertEquals(1, operations.size());
}
}

View File

@ -0,0 +1,380 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.slider.api.types.NodeInformation;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.server.appmaster.model.mock.MockAppState;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome;
import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
import org.apache.slider.server.appmaster.state.ContainerAssignment;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.apache.slider.api.ResourceKeys.COMPONENT_PLACEMENT_POLICY;
import static org.apache.slider.server.appmaster.model.mock.MockFactory.AAROLE_2;
/**
* Test Anti-affine placement.
*/
public class TestMockAppStateAAPlacement extends BaseMockAppStateAATest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(TestMockAppStateAAPlacement.class);
private static final int NODES = 3;
/**
* The YARN engine has a cluster with very few nodes (3) and lots of
* containers, so if AA placement isn't working, there will be affine
* placements surfacing.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(NODES, 8);
}
/**
* This is the simplest AA allocation: no labels, so allocate anywhere.
* @throws Throwable
*/
@Test
public void testAllocateAANoLabel() throws Throwable {
RoleStatus aaRole = getAaRole();
assertTrue(cloneNodemap().size() > 0);
// want multiple instances, so there will be iterations
aaRole.setDesired(2);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
AMRMClient.ContainerRequest request = getSingleRequest(ops);
assertFalse(request.getRelaxLocality());
assertEquals(request.getNodes().size(), engine.getCluster()
.getClusterSize());
assertNull(request.getRacks());
assertNotNull(request.getCapability());
Container allocated = engine.allocateContainer(request);
// notify the container ane expect
List<ContainerAssignment> assignments = new ArrayList<>();
List<AbstractRMOperation> operations = new ArrayList<>();
appState.onContainersAllocated(Arrays.asList(allocated), assignments,
operations);
String host = allocated.getNodeId().getHost();
NodeInstance hostInstance = cloneNodemap().get(host);
assertEquals(1, hostInstance.get(aaRole.getKey()).getStarting());
assertFalse(hostInstance.canHost(aaRole.getKey(), ""));
assertFalse(hostInstance.canHost(aaRole.getKey(), null));
// assignment
assertEquals(1, assignments.size());
// verify the release matches the allocation
assertEquals(2, operations.size());
assertNotNull(getCancel(operations, 0).getCapability().equals(allocated
.getResource()));
// we also expect a new allocation request to have been issued
ContainerRequest req2 = getRequest(operations, 1);
assertEquals(req2.getNodes().size(), engine.getCluster()
.getClusterSize() - 1);
assertFalse(req2.getNodes().contains(host));
assertFalse(request.getRelaxLocality());
// verify the pending couner is down
assertEquals(0L, aaRole.getAAPending());
Container allocated2 = engine.allocateContainer(req2);
// placement must be on a different host
assertNotEquals(allocated2.getNodeId(), allocated.getNodeId());
ContainerAssignment assigned = assignments.get(0);
Container container = assigned.container;
RoleInstance ri = roleInstance(assigned);
//tell the app it arrived
appState.containerStartSubmitted(container, ri);
assertNotNull(appState.onNodeManagerContainerStarted(container.getId()));
ops = appState.reviewRequestAndReleaseNodes();
assertEquals(0, ops.size());
assertAllContainersAA();
// identify those hosts with an aa role on
Map<Integer, String> naming = appState.buildNamingMap();
assertEquals(3, naming.size());
String name = aaRole.getName();
assertEquals(name, naming.get(aaRole.getKey()));
Map<String, NodeInformation> info =
appState.getRoleHistory().getNodeInformationSnapshot(naming);
assertTrue(SliderUtils.isNotEmpty(info));
NodeInformation nodeInformation = info.get(host);
assertNotNull(nodeInformation);
assertTrue(SliderUtils.isNotEmpty(nodeInformation.entries));
assertNotNull(nodeInformation.entries.get(name));
assertEquals(1, nodeInformation.entries.get(name).live);
}
@Test
public void testAllocateFlexUp() throws Throwable {
RoleStatus aaRole = getAaRole();
// want multiple instances, so there will be iterations
aaRole.setDesired(2);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
getSingleRequest(ops);
assertEquals(1, aaRole.getRequested());
assertEquals(1, aaRole.getAAPending());
assertEquals(aaRole.getActualAndRequested() + aaRole
.getAAPending(), aaRole.getDesired());
// now trigger that flex up
aaRole.setDesired(3);
// expect: no new reqests, pending count ++
List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
assertTrue(ops2.isEmpty());
assertEquals(aaRole.getRunning() + aaRole.getAAPending() +
aaRole.getOutstandingAARequestCount(), aaRole.getDesired());
// 1 outstanding
assertEquals(0, aaRole.getRunning());
assertTrue(aaRole.isAARequestOutstanding());
// and one AA
assertEquals(2, aaRole.getAAPending());
assertAllContainersAA();
// next iter
assertEquals(1, submitOperations(ops, EMPTY_ID_LIST, ops2).size());
assertEquals(2, ops2.size());
assertEquals(1, aaRole.getAAPending());
assertAllContainersAA();
assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
// now trigger the next execution cycle
List<AbstractRMOperation> ops3 = new ArrayList<>();
assertEquals(1, submitOperations(ops2, EMPTY_ID_LIST, ops3).size());
assertEquals(2, ops3.size());
assertEquals(0, aaRole.getAAPending());
assertAllContainersAA();
}
@Test
public void testAllocateFlexDownDecrementsPending() throws Throwable {
RoleStatus aaRole = getAaRole();
// want multiple instances, so there will be iterations
aaRole.setDesired(2);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
getSingleRequest(ops);
assertEquals(1, aaRole.getAAPending());
assertTrue(aaRole.isAARequestOutstanding());
// flex down so that the next request should be cancelled
aaRole.setDesired(1);
// expect: no new requests, pending count --
List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
assertTrue(ops2.isEmpty());
assertTrue(aaRole.isAARequestOutstanding());
assertEquals(0, aaRole.getAAPending());
assertAllContainersAA();
// next iter
submitOperations(ops, EMPTY_ID_LIST, ops2).size();
assertEquals(1, ops2.size());
assertAllContainersAA();
}
/**
* Here flex down while there is only one outstanding request.
* The outstanding flex should be cancelled
* @throws Throwable
*/
@Test
public void testAllocateFlexDownForcesCancel() throws Throwable {
RoleStatus aaRole = getAaRole();
// want multiple instances, so there will be iterations
aaRole.setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
getSingleRequest(ops);
assertEquals(1, aaRole.getRequested());
assertEquals(0, aaRole.getAAPending());
assertTrue(aaRole.isAARequestOutstanding());
// flex down so that the next request should be cancelled
aaRole.setDesired(0);
// expect: no new requests, pending count --
List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
assertEquals(0, aaRole.getRequested());
assertEquals(0, aaRole.getAAPending());
assertFalse(aaRole.isAARequestOutstanding());
assertEquals(1, ops2.size());
getSingleCancel(ops2);
// next iter
submitOperations(ops, EMPTY_ID_LIST, ops2).size();
getSingleRelease(ops2);
}
void assertAllContainersAA() {
assertAllContainersAA(getAaRole().getKey());
}
/**
*
* @throws Throwable
*/
@Test
public void testAskForTooMany() throws Throwable {
RoleStatus aaRole = getAaRole();
describe("Ask for 1 more than the no of available nodes;" +
" expect the final request to be unsatisfied until the cluster " +
"changes size");
//more than expected
aaRole.setDesired(NODES + 1);
List<AbstractRMOperation > operations = appState
.reviewRequestAndReleaseNodes();
assertTrue(aaRole.isAARequestOutstanding());
assertEquals(NODES, aaRole.getAAPending());
for (int i = 0; i < NODES; i++) {
String iter = "Iteration " + i + " role = " + aaRole;
LOG.info(iter);
List<AbstractRMOperation > operationsOut = new ArrayList<>();
assertEquals(1, submitOperations(operations, EMPTY_ID_LIST,
operationsOut).size());
operations = operationsOut;
if (i + 1 < NODES) {
assertEquals(2, operations.size());
} else {
assertEquals(1, operations.size());
}
assertAllContainersAA();
}
// expect an outstanding AA request to be unsatisfied
assertTrue(aaRole.getRunning() < aaRole.getDesired());
assertEquals(0, aaRole.getRequested());
assertFalse(aaRole.isAARequestOutstanding());
List<Container> allocatedContainers = engine.execute(operations,
EMPTY_ID_LIST);
assertEquals(0, allocatedContainers.size());
// in a review now, no more requests can be generated, as there is no
// space for AA placements, even though there is cluster capacity
assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
// now do a node update (this doesn't touch the YARN engine; the node
// isn't really there)
NodeUpdatedOutcome outcome = addNewNode();
assertEquals(cloneNodemap().size(), NODES + 1);
assertTrue(outcome.clusterChanged);
// no active calls to empty
assertTrue(outcome.operations.isEmpty());
assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
}
protected AppState.NodeUpdatedOutcome addNewNode() {
return updateNodes(MockFactory.INSTANCE.newNodeReport("4", NodeState
.RUNNING, "gpu"));
}
@Test
public void testClusterSizeChangesDuringRequestSequence() throws Throwable {
RoleStatus aaRole = getAaRole();
describe("Change the cluster size where the cluster size changes during " +
"a test sequence.");
aaRole.setDesired(NODES + 1);
appState.reviewRequestAndReleaseNodes();
assertTrue(aaRole.isAARequestOutstanding());
assertEquals(NODES, aaRole.getAAPending());
NodeUpdatedOutcome outcome = addNewNode();
assertTrue(outcome.clusterChanged);
// one call to cancel
assertEquals(1, outcome.operations.size());
// and on a review, one more to rebuild
assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
}
@Test
public void testBindingInfoMustHaveNodeMap() throws Throwable {
AppStateBindingInfo bindingInfo = buildBindingInfo();
bindingInfo.nodeReports = null;
try {
MockAppState state = new MockAppState(bindingInfo);
fail("Expected an exception, got " + state);
} catch (IllegalArgumentException expected) {
}
}
@Test
public void testAMRestart() throws Throwable {
int desiredAA = 3;
getAaRole().setDesired(desiredAA);
List<RoleInstance> instances = createAndStartNodes();
List<Container> containers = new ArrayList<>();
for (RoleInstance instance : instances) {
containers.add(instance.container);
}
// now destroy the app state
AppStateBindingInfo bindingInfo = buildBindingInfo();
bindingInfo.application = factory.newApplication(0, 0, desiredAA).name(
getTestName());
bindingInfo.application.getComponent(ROLE2)
.getConfiguration().setProperty(COMPONENT_PLACEMENT_POLICY,
Integer.toString(PlacementPolicy.ANTI_AFFINITY_REQUIRED));
bindingInfo.liveContainers = containers;
appState = new MockAppState(bindingInfo);
RoleStatus aaRole = lookupRole(AAROLE_2.name);
RoleStatus gpuRole = lookupRole(MockFactory.AAROLE_1_GPU.name);
appState.reviewRequestAndReleaseNodes();
assertTrue(aaRole.isAntiAffinePlacement());
assertTrue(aaRole.isAARequestOutstanding());
}
}

View File

@ -0,0 +1,387 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.core.exceptions.SliderException;
import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
import org.apache.slider.server.appmaster.actions.ResetFailureWindow;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockAM;
import org.apache.slider.server.appmaster.model.mock.MockAppState;
import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
import org.apache.slider.server.appmaster.state.ContainerOutcome;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
/**
* Test that if you have >1 role, the right roles are chosen for release.
*/
public class TestMockAppStateContainerFailure extends BaseMockAppStateTest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(TestMockAppStateContainerFailure.class);
private MockRMOperationHandler operationHandler = new
MockRMOperationHandler();
private MockAM mockAM = new MockAM();
@Override
public String getTestName() {
return "TestMockAppStateContainerFailure";
}
/**
* Small cluster with multiple containers per node,
* to guarantee many container allocations on each node.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(4, 8000);
}
@Override
public Application buildApplication() {
Application application = super.buildApplication();
application.getConfiguration().setProperty(
ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "10");
return application;
}
@Test
public void testShortLivedFail() throws Throwable {
getRole0Status().setDesired(1);
List<RoleInstance> instances = createAndStartNodes();
assertEquals(1, instances.size());
RoleInstance instance = instances.get(0);
long created = instance.createTime;
long started = instance.startTime;
assertTrue(created > 0);
assertTrue(started >= created);
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
ContainerId cid = ids.get(0);
assertTrue(appState.isShortLived(instance));
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid, 1));
assertNotNull(result.roleInstance);
assertTrue(result.containerFailed);
RoleStatus status = getRole0Status();
assertEquals(1, status.getFailed());
// assertEquals(1, status.getStartFailed());
//view the world
appState.getRoleHistory().dump();
List<NodeInstance> queue = appState.getRoleHistory().cloneRecentNodeList(
getRole0Status().getKey());
assertEquals(0, queue.size());
}
@Test
public void testLongLivedFail() throws Throwable {
getRole0Status().setDesired(1);
List<RoleInstance> instances = createAndStartNodes();
assertEquals(1, instances.size());
RoleInstance instance = instances.get(0);
instance.startTime = System.currentTimeMillis() - 60 * 60 * 1000;
assertFalse(appState.isShortLived(instance));
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
ContainerId cid = ids.get(0);
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid, 1));
assertNotNull(result.roleInstance);
assertTrue(result.containerFailed);
RoleStatus status = getRole0Status();
assertEquals(1, status.getFailed());
// assertEquals(0, status.getStartFailed());
//view the world
appState.getRoleHistory().dump();
List<NodeInstance> queue = appState.getRoleHistory().cloneRecentNodeList(
getRole0Status().getKey());
assertEquals(1, queue.size());
}
@Test
public void testNodeStartFailure() throws Throwable {
getRole0Status().setDesired(1);
List<RoleInstance> instances = createAndSubmitNodes();
assertEquals(1, instances.size());
RoleInstance instance = instances.get(0);
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
ContainerId cid = ids.get(0);
appState.onNodeManagerContainerStartFailed(cid, new SliderException(
"oops"));
RoleStatus status = getRole0Status();
assertEquals(1, status.getFailed());
// assertEquals(1, status.getStartFailed());
RoleHistory history = appState.getRoleHistory();
history.dump();
List<NodeInstance> queue = history.cloneRecentNodeList(getRole0Status()
.getKey());
assertEquals(0, queue.size());
NodeInstance ni = history.getOrCreateNodeInstance(instance.container);
NodeEntry re = ni.get(getRole0Status().getKey());
assertEquals(1, re.getFailed());
assertEquals(1, re.getStartFailed());
}
@Test
public void testRecurrentStartupFailure() throws Throwable {
getRole0Status().setDesired(1);
try {
for (int i = 0; i< 100; i++) {
List<RoleInstance> instances = createAndSubmitNodes();
assertEquals(1, instances.size());
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
ContainerId cid = ids.get(0);
LOG.info("{} instance {} {}", i, instances.get(0), cid);
assertNotNull(cid);
appState.onNodeManagerContainerStartFailed(cid,
new SliderException("failure #" + i));
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid));
assertTrue(result.containerFailed);
}
fail("Cluster did not fail from too many startup failures");
} catch (TriggerClusterTeardownException teardown) {
LOG.info("Exception {} : {}", teardown.getExitCode(), teardown);
}
}
@Test
public void testRecurrentStartupFailureWithUnlimitedFailures() throws
Throwable {
// Update instance definition to allow containers to fail any number of
// times
AppStateBindingInfo bindingInfo = buildBindingInfo();
bindingInfo.application.getConfiguration().setProperty(
ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "0");
appState = new MockAppState(bindingInfo);
getRole0Status().setDesired(1);
try {
for (int i = 0; i < 100; i++) {
List<RoleInstance> instances = createAndSubmitNodes();
assertEquals(1, instances.size());
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
ContainerId cid = ids.get(0);
LOG.info("{} instance {} {}", i, instances.get(0), cid);
assertNotNull(cid);
appState.onNodeManagerContainerStartFailed(cid,
new SliderException("failure #" + i));
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid));
assertTrue(result.containerFailed);
}
} catch (TriggerClusterTeardownException teardown) {
LOG.info("Exception {} : {}", teardown.getExitCode(), teardown);
fail("Cluster failed despite " + ResourceKeys
.CONTAINER_FAILURE_THRESHOLD + " = 0");
}
}
@Test
public void testRoleStatusFailureWindow() throws Throwable {
ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
// initial reset
resetter.execute(mockAM, null, appState);
getRole0Status().setDesired(1);
for (int i = 0; i < 100; i++) {
resetter.execute(mockAM, null, appState);
List<RoleInstance> instances = createAndSubmitNodes();
assertEquals(1, instances.size());
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
ContainerId cid = ids.get(0);
LOG.info("{} instance {} {}", i, instances.get(0), cid);
assertNotNull(cid);
appState.onNodeManagerContainerStartFailed(
cid,
new SliderException("failure #" + i));
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid));
assertTrue(result.containerFailed);
}
}
@Test
public void testRoleStatusFailed() throws Throwable {
RoleStatus status = getRole0Status();
// limits exceeded
appState.incFailedContainers(status, ContainerOutcome.Failed);
assertEquals(1, status.getFailed());
assertEquals(1L, status.getFailedRecently());
assertEquals(0L, status.getLimitsExceeded());
assertEquals(0L, status.getPreempted());
assertEquals(0L, status.getDiskFailed());
ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
resetter.execute(mockAM, null, appState);
assertEquals(1, status.getFailed());
assertEquals(0L, status.getFailedRecently());
}
@Test
public void testRoleStatusFailedLimitsExceeded() throws Throwable {
RoleStatus status = getRole0Status();
// limits exceeded
appState.incFailedContainers(status, ContainerOutcome
.Failed_limits_exceeded);
assertEquals(1, status.getFailed());
assertEquals(1L, status.getFailedRecently());
assertEquals(1L, status.getLimitsExceeded());
assertEquals(0L, status.getPreempted());
assertEquals(0L, status.getDiskFailed());
ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
resetter.execute(mockAM, null, appState);
assertEquals(1, status.getFailed());
assertEquals(0L, status.getFailedRecently());
assertEquals(1L, status.getLimitsExceeded());
}
@Test
public void testRoleStatusFailedPrempted() throws Throwable {
RoleStatus status = getRole0Status();
// limits exceeded
appState.incFailedContainers(status, ContainerOutcome.Preempted);
assertEquals(0, status.getFailed());
assertEquals(1L, status.getPreempted());
assertEquals(0L, status.getFailedRecently());
assertEquals(0L, status.getDiskFailed());
ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
resetter.execute(mockAM, null, appState);
assertEquals(1L, status.getPreempted());
}
@Test
public void testRoleStatusFailedNode() throws Throwable {
RoleStatus status = getRole0Status();
// limits exceeded
appState.incFailedContainers(status, ContainerOutcome.Disk_failure);
assertEquals(1, status.getFailed());
assertEquals(0L, status.getFailedRecently());
assertEquals(0L, status.getLimitsExceeded());
assertEquals(0L, status.getPreempted());
assertEquals(1L, status.getDiskFailed());
}
@Test
public void testNodeEntryCompleted() throws Throwable {
NodeEntry nodeEntry = new NodeEntry(1);
nodeEntry.containerCompleted(true, ContainerOutcome.Completed);
assertEquals(0, nodeEntry.getFailed());
assertEquals(0, nodeEntry.getFailedRecently());
assertEquals(0, nodeEntry.getStartFailed());
assertEquals(0, nodeEntry.getPreempted());
assertEquals(0, nodeEntry.getActive());
assertTrue(nodeEntry.isAvailable());
}
@Test
public void testNodeEntryFailed() throws Throwable {
NodeEntry nodeEntry = new NodeEntry(1);
nodeEntry.containerCompleted(false, ContainerOutcome.Failed);
assertEquals(1, nodeEntry.getFailed());
assertEquals(1, nodeEntry.getFailedRecently());
assertEquals(0, nodeEntry.getStartFailed());
assertEquals(0, nodeEntry.getPreempted());
assertEquals(0, nodeEntry.getActive());
assertTrue(nodeEntry.isAvailable());
nodeEntry.resetFailedRecently();
assertEquals(1, nodeEntry.getFailed());
assertEquals(0, nodeEntry.getFailedRecently());
}
@Test
public void testNodeEntryLimitsExceeded() throws Throwable {
NodeEntry nodeEntry = new NodeEntry(1);
nodeEntry.containerCompleted(false, ContainerOutcome
.Failed_limits_exceeded);
assertEquals(0, nodeEntry.getFailed());
assertEquals(0, nodeEntry.getFailedRecently());
assertEquals(0, nodeEntry.getStartFailed());
assertEquals(0, nodeEntry.getPreempted());
}
@Test
public void testNodeEntryPreempted() throws Throwable {
NodeEntry nodeEntry = new NodeEntry(1);
nodeEntry.containerCompleted(false, ContainerOutcome.Preempted);
assertEquals(0, nodeEntry.getFailed());
assertEquals(0, nodeEntry.getFailedRecently());
assertEquals(0, nodeEntry.getStartFailed());
assertEquals(1, nodeEntry.getPreempted());
}
@Test
public void testNodeEntryNodeFailure() throws Throwable {
NodeEntry nodeEntry = new NodeEntry(1);
nodeEntry.containerCompleted(false, ContainerOutcome.Disk_failure);
assertEquals(1, nodeEntry.getFailed());
assertEquals(1, nodeEntry.getFailedRecently());
assertEquals(0, nodeEntry.getStartFailed());
assertEquals(0, nodeEntry.getPreempted());
}
}

View File

@ -0,0 +1,212 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.providers.ProviderRole;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* Test that if you have >1 role, the right roles are chosen for release.
*/
public class TestMockAppStateDynamicHistory extends BaseMockAppStateTest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(TestMockAppStateDynamicHistory.class);
/**
* Small cluster with multiple containers per node,
* to guarantee many container allocations on each node.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(8, 1);
}
@Test
public void testDynamicRoleHistory() throws Throwable {
String dynamic = "dynamicRole";
long desired = 1;
int placementPolicy = PlacementPolicy.DEFAULT;
// snapshot and patch existing spec
Application application = appState.getClusterStatus();
Component component = new Component().name(dynamic).numberOfContainers(
desired);
component.getConfiguration().setProperty(ResourceKeys
.COMPONENT_PLACEMENT_POLICY, "" + placementPolicy);
application.getComponents().add(component);
// write the definitions
List<ProviderRole> updates = appState.updateComponents(
Collections.singletonMap(dynamic, desired));
assertEquals(1, updates.size());
ProviderRole updatedRole = updates.get(0);
assertEquals(updatedRole.placementPolicy, placementPolicy);
// now look at the role map
assertNotNull(appState.getRoleMap().get(dynamic));
ProviderRole mappedRole = appState.getRoleMap().get(dynamic);
int rolePriority = mappedRole.id;
Map<Integer, ProviderRole> priorityMap = appState.getRolePriorityMap();
assertEquals(priorityMap.size(), 4);
ProviderRole dynamicProviderRole = priorityMap.get(rolePriority);
assertNotNull(dynamicProviderRole);
assertEquals(dynamicProviderRole.id, rolePriority);
assertNotNull(appState.getRoleStatusMap().get(rolePriority));
RoleStatus dynamicRoleStatus =
appState.getRoleStatusMap().get(rolePriority);
assertEquals(dynamicRoleStatus.getDesired(), desired);
// before allocating the nodes, fill up the capacity of some of the
// hosts
engine.getAllocator().nextIndex();
int targetNode = 2;
assertEquals(targetNode, engine.getAllocator().nextIndex());
String targetHostname = engine.getCluster().nodeAt(targetNode)
.getHostname();
// clock is set to a small value
appState.setTime(100000);
// allocate the nodes
List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
assertEquals(1, actions.size());
ContainerRequestOperation action0 = (ContainerRequestOperation)actions
.get(0);
ContainerRequest request = action0.getRequest();
assertTrue(SliderUtils.isEmpty(request.getNodes()));
List<ContainerId> released = new ArrayList<>();
List<RoleInstance> allocations = submitOperations(actions, released);
processSubmissionOperations(allocations, new ArrayList<>(), released);
assertEquals(1, allocations.size());
RoleInstance ri = allocations.get(0);
assertEquals(ri.role, dynamic);
assertEquals(ri.roleId, rolePriority);
assertEquals(ri.host, targetHostname);
// now look at the role history
RoleHistory roleHistory = appState.getRoleHistory();
List<NodeInstance> activeNodes = roleHistory.listActiveNodes(
rolePriority);
assertEquals(activeNodes.size(), 1);
NodeInstance activeNode = activeNodes.get(0);
assertNotNull(activeNode.get(rolePriority));
NodeEntry entry8 = activeNode.get(rolePriority);
assertEquals(entry8.getActive(), 1);
assertEquals(activeNode.hostname, targetHostname);
NodeInstance activeNodeInstance =
roleHistory.getOrCreateNodeInstance(ri.container);
assertEquals(activeNode, activeNodeInstance);
NodeEntry entry = activeNodeInstance.get(rolePriority);
assertNotNull(entry);
assertTrue(entry.getActive() > 0);
assertTrue(entry.getLive() > 0);
// now trigger a termination event on that role
// increment time for a long-lived failure event
appState.incTime(100000);
LOG.debug("Triggering failure");
ContainerId cid = ri.getContainerId();
AppState.NodeCompletionResult result = appState.onCompletedContainer(
containerStatus(cid, 1));
assertEquals(result.roleInstance, ri);
assertTrue(result.containerFailed);
roleHistory.dump();
// values should have changed
assertEquals(1, entry.getFailed());
assertEquals(0, entry.getStartFailed());
assertEquals(0, entry.getActive());
assertEquals(0, entry.getLive());
List<NodeInstance> nodesForRoleId =
roleHistory.getRecentNodesForRoleId(rolePriority);
assertNotNull(nodesForRoleId);
// make sure new nodes will default to a different host in the engine
assertTrue(targetNode < engine.getAllocator().nextIndex());
actions = appState.reviewRequestAndReleaseNodes();
assertEquals(1, actions.size());
ContainerRequestOperation action1 = (ContainerRequestOperation) actions
.get(0);
ContainerRequest request1 = action1.getRequest();
assertTrue(SliderUtils.isNotEmpty(request1.getNodes()));
}
@Test(expected = BadConfigException.class)
public void testRoleHistoryRoleAdditions() throws Throwable {
MockRoleHistory roleHistory = new MockRoleHistory(new ArrayList<>());
roleHistory.addNewRole(new RoleStatus(new ProviderRole("one", 1)));
roleHistory.addNewRole(new RoleStatus(new ProviderRole("two", 1)));
roleHistory.dump();
}
@Test(expected = BadConfigException.class)
public void testRoleHistoryRoleStartupConflict() throws Throwable {
MockRoleHistory roleHistory = new MockRoleHistory(Arrays.asList(
new ProviderRole("one", 1), new ProviderRole("two", 1)
));
roleHistory.dump();
}
}

View File

@ -0,0 +1,243 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.apache.slider.server.appmaster.state.AppState.NodeCompletionResult;
import org.apache.slider.server.appmaster.state.ContainerPriority;
import org.apache.slider.server.appmaster.state.RoleHistoryUtils;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import static org.apache.slider.server.appmaster.model.mock.MockFactory.NODE_FAILURE_THRESHOLD;
/**
* Test that if you have >1 role, the right roles are chosen for release.
*/
public class TestMockAppStateDynamicRoles extends BaseMockAppStateTest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(TestMockAppStateDynamicRoles.class);
private static final String ROLE4 = "4";
private static final String ROLE5 = "5";
@Override
public String getTestName() {
return "TestMockAppStateDynamicRoles";
}
/**
* Small cluster with multiple containers per node,
* to guarantee many container allocations on each node.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(8, 2);
}
@Override
public Application buildApplication() {
Application application = super.buildApplication();
Component component = new Component().name(ROLE4).numberOfContainers(1L);
component.getConfiguration().setProperty(ResourceKeys
.NODE_FAILURE_THRESHOLD, Integer.toString(3));
application.getComponents().add(component);
component = new Component().name(ROLE5).numberOfContainers(1L);
component.getConfiguration().setProperty(ResourceKeys
.COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy.STRICT));
application.getComponents().add(component);
return application;
}
@Test
public void testAllocateReleaseRealloc() throws Throwable {
createAndStartNodes();
appState.reviewRequestAndReleaseNodes();
appState.getRoleHistory().dump();
}
/**
* Find all allocations for a specific role.
* @param role role Id/priority
* @param actions source list
* @return found list
*/
List<ContainerRequestOperation> findAllocationsForRole(int role,
List<AbstractRMOperation> actions) {
List<ContainerRequestOperation> ops = new ArrayList<>();
for (AbstractRMOperation op : actions) {
if (op instanceof ContainerRequestOperation && role ==
ContainerPriority.extractRole(((ContainerRequestOperation) op)
.getRequest().getPriority())) {
ops.add((ContainerRequestOperation) op);
}
}
return ops;
}
@Test
public void testStrictPlacementInitialRequest() throws Throwable {
LOG.info("Initial engine state = {}", engine);
List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
assertEquals(2, actions.size());
// neither have locality at this point
assertRelaxLocalityFlag(appState.lookupRoleStatus(ROLE4).getKey(), null,
true, actions);
assertRelaxLocalityFlag(appState.lookupRoleStatus(ROLE5).getKey(), null,
true, actions);
}
@Test
public void testPolicyPropagation() throws Throwable {
assertEquals(0, (appState.lookupRoleStatus(ROLE4).getPlacementPolicy() &
PlacementPolicy.STRICT));
assertNotEquals(0, (appState.lookupRoleStatus(ROLE5).getPlacementPolicy() &
PlacementPolicy.STRICT));
}
@Test
public void testNodeFailureThresholdPropagation() throws Throwable {
assertEquals(3, appState.lookupRoleStatus(ROLE4).getNodeFailureThreshold());
assertEquals(NODE_FAILURE_THRESHOLD, appState.lookupRoleStatus(ROLE5)
.getNodeFailureThreshold());
}
@Test
public void testLaxPlacementSecondRequestRole4() throws Throwable {
LOG.info("Initial engine state = {}", engine);
RoleStatus role4 = appState.lookupRoleStatus(ROLE4);
RoleStatus role5 = appState.lookupRoleStatus(ROLE5);
role4.setDesired(1);
role5.setDesired(0);
List<RoleInstance> instances = createStartAndStopNodes(new ArrayList<>());
assertEquals(1, instances.size());
int id = appState.lookupRoleStatus(ROLE4).getKey();
RoleInstance instanceA = null;
for (RoleInstance instance : instances) {
if (instance.roleId == id) {
instanceA = instance;
}
}
assertNotNull(instanceA);
String hostname = RoleHistoryUtils.hostnameOf(instanceA.container);
LOG.info("Allocated engine state = {}", engine);
assertEquals(1, engine.containerCount());
assertEquals(1, role4.getRunning());
// shrinking cluster
role4.setDesired(0);
appState.lookupRoleStatus(ROLE4).setDesired(0);
List<NodeCompletionResult> completionResults = new ArrayList<>();
createStartAndStopNodes(completionResults);
assertEquals(0, engine.containerCount());
assertEquals(1, completionResults.size());
// expanding: expect hostnames now
role4.setDesired(1);
List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
assertEquals(1, actions.size());
ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0);
List<String> nodes = cro.getRequest().getNodes();
assertEquals(1, nodes.size());
assertEquals(hostname, nodes.get(0));
}
@Test
public void testStrictPlacementSecondRequestRole5() throws Throwable {
LOG.info("Initial engine state = {}", engine);
RoleStatus role4 = appState.lookupRoleStatus(ROLE4);
RoleStatus role5 = appState.lookupRoleStatus(ROLE5);
role4.setDesired(0);
role5.setDesired(1);
List<RoleInstance> instances = createStartAndStopNodes(new ArrayList<>());
assertEquals(1, instances.size());
int id = appState.lookupRoleStatus(ROLE5).getKey();
RoleInstance instanceA = null;
for (RoleInstance instance : instances) {
if (instance.roleId == id) {
instanceA = instance;
}
}
assertNotNull(instanceA);
String hostname = RoleHistoryUtils.hostnameOf(instanceA.container);
LOG.info("Allocated engine state = {}", engine);
assertEquals(1, engine.containerCount());
assertEquals(1, role5.getRunning());
// shrinking cluster
role5.setDesired(0);
List<NodeCompletionResult> completionResults = new ArrayList<>();
createStartAndStopNodes(completionResults);
assertEquals(0, engine.containerCount());
assertEquals(1, completionResults.size());
assertEquals(0, role5.getRunning());
role5.setDesired(1);
List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
assertEquals(1, actions.size());
assertRelaxLocalityFlag(id, "", false, actions);
ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0);
List<String> nodes = cro.getRequest().getNodes();
assertEquals(1, nodes.size());
assertEquals(hostname, nodes.get(0));
}
public void assertRelaxLocalityFlag(
int role,
String expectedHost,
boolean expectedRelaxFlag,
List<AbstractRMOperation> actions) {
List<ContainerRequestOperation> requests = findAllocationsForRole(
role, actions);
assertEquals(1, requests.size());
ContainerRequestOperation req = requests.get(0);
assertEquals(expectedRelaxFlag, req.getRequest().getRelaxLocality());
}
}

View File

@ -0,0 +1,160 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.fs.Path;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.core.exceptions.SliderInternalStateException;
import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockAppState;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.avro.LoadedRoleHistory;
import org.apache.slider.server.avro.RoleHistoryWriter;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.Collections;
/**
* Test that if you have more than one role, the right roles are chosen for
* release.
*/
public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(TestMockAppStateFlexDynamicRoles.class);
@Override
public String getTestName() {
return "TestMockAppStateFlexDynamicRoles";
}
/**
* Small cluster with multiple containers per node,
* to guarantee many container allocations on each node.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(4, 4);
}
@Override
public AppStateBindingInfo buildBindingInfo() {
AppStateBindingInfo bindingInfo = super.buildBindingInfo();
bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
return bindingInfo;
}
@Override
public Application buildApplication() {
Application application = super.buildApplication();
Component component = new Component().name("dynamic-6")
.numberOfContainers(1L);
application.getComponents().add(component);
return application;
}
@Before
public void init()
throws TriggerClusterTeardownException, SliderInternalStateException {
createAndStartNodes();
}
@Test
public void testDynamicFlexAddRole() throws Throwable {
Application application = appState.getClusterStatus();
Component component = new Component().name("dynamicAdd7")
.numberOfContainers(1L);
application.getComponents().add(component);
appState.updateComponents(Collections.singletonMap(component.getName(),
component.getNumberOfContainers()));
createAndStartNodes();
dumpClusterDescription("updated CD", appState.getClusterStatus());
appState.lookupRoleStatus("dynamicAdd7");
}
@Test
public void testDynamicFlexDropRole() throws Throwable {
appState.updateComponents(Collections.singletonMap("dynamic-6", 0L));
Application getCD = appState.getClusterStatus();
dumpClusterDescription("updated CD", getCD);
//status is retained for future
appState.lookupRoleStatus("dynamic-6");
}
@Test
public void testHistorySaveFlexLoad() throws Throwable {
Application application = appState.getClusterStatus();
RoleHistory roleHistory = appState.getRoleHistory();
Path history = roleHistory.saveHistory(0x0001);
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
Component component = new Component().name("HistorySaveFlexLoad")
.numberOfContainers(1L);
application.getComponents().add(component);
appState.updateComponents(Collections.singletonMap(component.getName(),
component.getNumberOfContainers()));
createAndStartNodes();
LoadedRoleHistory loadedRoleHistory =
historyWriter.read(fs, history);
assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory));
}
@Test
public void testHistoryFlexSaveResetLoad() throws Throwable {
Application application = appState.getClusterStatus();
Component component = new Component().name("HistoryFlexSaveLoad")
.numberOfContainers(1L);
application.getComponents().add(component);
appState.updateComponents(Collections.singletonMap(component.getName(),
component.getNumberOfContainers()));
createAndStartNodes();
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
RoleHistory roleHistory = appState.getRoleHistory();
Path history = roleHistory.saveHistory(0x0002);
//now reset the app state
File historyWorkDir2 = new File("target/history" + getTestName() +
"-0002");
Path historyPath2 = new Path(historyWorkDir2.toURI());
appState = new MockAppState();
AppStateBindingInfo binding2 = buildBindingInfo();
binding2.application = factory.newApplication(0, 0, 0)
.name(getTestName());
binding2.historyPath = historyPath2;
appState.buildInstance(binding2);
// on this read there won't be the right number of roles
LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory));
}
}

View File

@ -0,0 +1,201 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.types.ApplicationLivenessInformation;
import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.ContainerAssignment;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* Test app state flexing.
*/
public class TestMockAppStateFlexing extends BaseMockAppStateTest implements
MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(BaseMockAppStateTest.class);
@Override
public String getTestName() {
return "TestMockAppStateFlexing";
}
@Test
public void testFlexDuringLaunchPhase() throws Throwable {
// ask for one instance of role0
getRole0Status().setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
// at this point there's now one request in the list
assertEquals(1, ops.size());
// and in a liveness check, one outstanding
ApplicationLivenessInformation liveness =
appState.getApplicationLivenessInformation();
assertEquals(1, liveness.requestsOutstanding);
assertFalse(liveness.allRequestsSatisfied);
List<Container> allocations = engine.execute(ops);
List<ContainerAssignment> assignments = new ArrayList<>();
List<AbstractRMOperation> releases = new ArrayList<>();
appState.onContainersAllocated(allocations, assignments, releases);
assertEquals(1, assignments.size());
ContainerAssignment assigned = assignments.get(0);
Container target = assigned.container;
RoleInstance ri = roleInstance(assigned);
ops = appState.reviewRequestAndReleaseNodes();
assertTrue(ops.isEmpty());
liveness = appState.getApplicationLivenessInformation();
assertEquals(0, liveness.requestsOutstanding);
assertTrue(liveness.allRequestsSatisfied);
//now this is the start point.
appState.containerStartSubmitted(target, ri);
ops = appState.reviewRequestAndReleaseNodes();
assertTrue(ops.isEmpty());
appState.innerOnNodeManagerContainerStarted(target.getId());
}
@Test
public void testFlexBeforeAllocationPhase() throws Throwable {
getRole0Status().setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertFalse(ops.isEmpty());
// second scan will find the first run outstanding, so not re-issue
// any more container requests
List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
assertTrue(ops2.isEmpty());
// and in a liveness check, one outstanding
ApplicationLivenessInformation liveness = appState
.getApplicationLivenessInformation();
assertEquals(1, liveness.requestsOutstanding);
assertFalse(liveness.allRequestsSatisfied);
appState.refreshClusterStatus();
Application application = appState.getClusterStatus();
// TODO cluster status returns liveness info
// assertEquals(1, cd.liveness.requestsOutstanding);
}
@Test
public void testFlexDownTwice() throws Throwable {
int r0 = 6;
int r1 = 0;
int r2 = 0;
getRole0Status().setDesired(r0);
getRole1Status().setDesired(r1);
getRole2Status().setDesired(r2);
List<RoleInstance> instances = createAndStartNodes();
int clusterSize = r0 + r1 + r2;
assertEquals(instances.size(), clusterSize);
LOG.info("shrinking cluster");
r0 = 4;
getRole0Status().setDesired(r0);
List<AppState.NodeCompletionResult> completionResults = new ArrayList<>();
instances = createStartAndStopNodes(completionResults);
assertEquals(0, instances.size());
// assert two nodes were released
assertEquals(2, completionResults.size());
// no-op review
completionResults = new ArrayList<>();
instances = createStartAndStopNodes(completionResults);
assertEquals(0, instances.size());
// assert two nodes were released
assertEquals(0, completionResults.size());
// now shrink again
getRole0Status().setDesired(1);
completionResults = new ArrayList<>();
instances = createStartAndStopNodes(completionResults);
assertEquals(0, instances.size());
// assert two nodes were released
assertEquals(3, completionResults.size());
}
@Test
public void testFlexNegative() throws Throwable {
int r0 = 6;
int r1 = 0;
int r2 = 0;
getRole0Status().setDesired(r0);
getRole1Status().setDesired(r1);
getRole2Status().setDesired(r2);
List<RoleInstance> instances = createAndStartNodes();
int clusterSize = r0 + r1 + r2;
assertEquals(instances.size(), clusterSize);
LOG.info("shrinking cluster");
getRole0Status().setDesired(-2);
List<AppState.NodeCompletionResult> completionResults = new ArrayList<>();
try {
createStartAndStopNodes(completionResults);
fail("expected an exception");
} catch (TriggerClusterTeardownException e) {
}
}
@Test
public void testCancelWithRequestsOutstanding() throws Throwable {
// flex cluster size before the original set were allocated
getRole0Status().setDesired(6);
// build the ops
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
// here the data structures exist
// go down
getRole0Status().setDesired(3);
List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
assertEquals(3, ops2.size());
for (AbstractRMOperation op : ops2) {
assertTrue(op instanceof CancelSingleRequest);
}
}
}

View File

@ -0,0 +1,382 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.ContainerAssignment;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import static org.apache.slider.server.appmaster.state.ContainerPriority.buildPriority;
import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole;
/**
* Test app state RM operations.
*/
public class TestMockAppStateRMOperations extends BaseMockAppStateTest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(BaseMockAppStateTest.class);
@Override
public String getTestName() {
return "TestMockAppStateRMOperations";
}
@Test
public void testPriorityOnly() throws Throwable {
assertEquals(5, extractRole(buildPriority(5, false)));
}
@Test
public void testPriorityRoundTrip() throws Throwable {
assertEquals(5, extractRole(buildPriority(5, false)));
}
@Test
public void testPriorityRoundTripWithRequest() throws Throwable {
int priority = buildPriority(5, false);
assertEquals(5, extractRole(priority));
}
@Test
public void testMockAddOp() throws Throwable {
getRole0Status().setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 1);
ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
int priority = operation.getRequest().getPriority().getPriority();
assertEquals(extractRole(priority), getRole0Status().getKey());
MockRMOperationHandler handler = new MockRMOperationHandler();
handler.execute(ops);
AbstractRMOperation op = handler.getFirstOp();
assertTrue(op instanceof ContainerRequestOperation);
}
/**
* Test of a flex up and down op which verifies that outstanding
* requests are cancelled first.
* <ol>
* <li>request 5 nodes, assert 5 request made</li>
* <li>allocate 1 of them</li>
* <li>flex cluster size to 3</li>
* <li>assert this generates 2 cancel requests</li>
* </ol>
*/
@Test
public void testRequestThenCancelOps() throws Throwable {
RoleStatus role0 = getRole0Status();
role0.setDesired(5);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 5);
// now 5 outstanding requests.
assertEquals(5, role0.getRequested());
// allocate one
List<AbstractRMOperation> processed = new ArrayList<>();
processed.add(ops.get(0));
List<ContainerId> released = new ArrayList<>();
List<AppState.NodeCompletionResult> completionResults = new ArrayList<>();
submitOperations(processed, released);
List<RoleInstance> instances = createAndSubmitNodes(released);
processSubmissionOperations(instances, completionResults, released);
// four outstanding
assertEquals(4, role0.getRequested());
// flex cluster to 3
role0.setDesired(3);
ops = appState.reviewRequestAndReleaseNodes();
// expect two cancel operation from review
assertListLength(ops, 2);
for (AbstractRMOperation op : ops) {
assertTrue(op instanceof CancelSingleRequest);
}
MockRMOperationHandler handler = new MockRMOperationHandler();
handler.setAvailableToCancel(4);
handler.execute(ops);
assertEquals(2, handler.getAvailableToCancel());
assertEquals(2, role0.getRequested());
// flex down one more
role0.setDesired(2);
ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 1);
for (AbstractRMOperation op : ops) {
assertTrue(op instanceof CancelSingleRequest);
}
handler.execute(ops);
assertEquals(1, handler.getAvailableToCancel());
assertEquals(1, role0.getRequested());
}
@Test
public void testCancelNoActualContainers() throws Throwable {
RoleStatus role0 = getRole0Status();
role0.setDesired(5);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 5);
// now 5 outstanding requests.
assertEquals(5, role0.getRequested());
role0.setDesired(0);
ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 5);
}
@Test
public void testFlexDownOutstandingRequests() throws Throwable {
// engine only has two nodes, so > 2 will be outstanding
engine = new MockYarnEngine(1, 2);
List<AbstractRMOperation> ops;
// role: desired = 2, requested = 1, actual=1
RoleStatus role0 = getRole0Status();
role0.setDesired(4);
createAndSubmitNodes();
assertEquals(2, role0.getRequested());
assertEquals(2, role0.getRunning());
// there are now two outstanding, two actual
// Release 3 and verify that the two
// cancellations were combined with a release
role0.setDesired(1);
assertEquals(-3, role0.getDelta());
ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 3);
int numCancel = 0;
int numRelease = 0;
for (AbstractRMOperation op : ops) {
if (op instanceof CancelSingleRequest) {
numCancel++;
}
if (op instanceof ContainerReleaseOperation) {
numRelease++;
}
}
assertEquals(2, numCancel);
assertEquals(1, numRelease);
assertEquals(0, role0.getRequested());
// TODO releasing?
// assertEquals(1, role0.getReleasing());
}
@Test
public void testCancelAllOutstandingRequests() throws Throwable {
// role: desired = 2, requested = 1, actual=1
RoleStatus role0 = getRole0Status();
role0.setDesired(2);
List<AbstractRMOperation> ops;
ops = appState.reviewRequestAndReleaseNodes();
int count = 0;
for (AbstractRMOperation op : ops) {
if (op instanceof ContainerRequestOperation) {
count++;
}
}
assertEquals(2, count);
// there are now two outstanding, two actual
// Release 3 and verify that the two
// cancellations were combined with a release
role0.setDesired(0);
ops = appState.reviewRequestAndReleaseNodes();
assertEquals(2, ops.size());
for (AbstractRMOperation op : ops) {
assertTrue(op instanceof CancelSingleRequest);
}
}
@Test
public void testFlexUpOutstandingRequests() throws Throwable {
List<AbstractRMOperation> ops;
// role: desired = 2, requested = 1, actual=1
RoleStatus role0 = getRole0Status();
role0.setDesired(2);
appState.incRunningContainers(role0);
appState.incRequestedContainers(role0);
// flex up 2 nodes, yet expect only one node to be requested,
// as the outstanding request is taken into account
role0.setDesired(4);
appState.incRequestedContainers(role0);
assertEquals(1, role0.getRunning());
assertEquals(2, role0.getRequested());
assertEquals(3, role0.getActualAndRequested());
assertEquals(1, role0.getDelta());
ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 1);
assertTrue(ops.get(0) instanceof ContainerRequestOperation);
assertEquals(3, role0.getRequested());
}
@Test
public void testFlexUpNoSpace() throws Throwable {
// engine only has two nodes, so > 2 will be outstanding
engine = new MockYarnEngine(1, 2);
// role: desired = 2, requested = 1, actual=1
RoleStatus role0 = getRole0Status();
role0.setDesired(4);
createAndSubmitNodes();
assertEquals(2, role0.getRequested());
assertEquals(2, role0.getRunning());
role0.setDesired(8);
assertEquals(4, role0.getDelta());
createAndSubmitNodes();
assertEquals(6, role0.getRequested());
}
@Test
public void testAllocateReleaseOp() throws Throwable {
getRole0Status().setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
AMRMClient.ContainerRequest request = operation.getRequest();
Container cont = engine.allocateContainer(request);
List<Container> allocated = new ArrayList<>();
allocated.add(cont);
List<ContainerAssignment> assignments = new ArrayList<>();
List<AbstractRMOperation> operations = new ArrayList<>();
appState.onContainersAllocated(allocated, assignments, operations);
assertListLength(ops, 1);
assertListLength(assignments, 1);
ContainerAssignment assigned = assignments.get(0);
Container target = assigned.container;
assertEquals(target.getId(), cont.getId());
int roleId = assigned.role.getPriority();
assertEquals(roleId, extractRole(request.getPriority()));
assertEquals(assigned.role.getName(), ROLE0);
RoleInstance ri = roleInstance(assigned);
//tell the app it arrived
appState.containerStartSubmitted(target, ri);
appState.innerOnNodeManagerContainerStarted(target.getId());
assertEquals(1, getRole0Status().getRunning());
//now release it by changing the role status
getRole0Status().setDesired(0);
ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 1);
assertTrue(ops.get(0) instanceof ContainerReleaseOperation);
ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0);
assertEquals(release.getContainerId(), cont.getId());
}
@Test
public void testComplexAllocation() throws Throwable {
getRole0Status().setDesired(1);
getRole1Status().setDesired(3);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
List<Container> allocations = engine.execute(ops);
List<ContainerAssignment> assignments = new ArrayList<>();
List<AbstractRMOperation> releases = new ArrayList<>();
appState.onContainersAllocated(allocations, assignments, releases);
// we expect four release requests here for all the allocated containers
assertListLength(releases, 4);
for (AbstractRMOperation op : releases) {
assertTrue(op instanceof CancelSingleRequest);
}
assertListLength(assignments, 4);
for (ContainerAssignment assigned : assignments) {
Container target = assigned.container;
RoleInstance ri = roleInstance(assigned);
appState.containerStartSubmitted(target, ri);
}
//insert some async operation here
for (ContainerAssignment assigned : assignments) {
Container target = assigned.container;
appState.innerOnNodeManagerContainerStarted(target.getId());
}
assertEquals(4, engine.containerCount());
getRole1Status().setDesired(0);
ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 3);
allocations = engine.execute(ops);
assertEquals(1, engine.containerCount());
appState.onContainersAllocated(allocations, assignments, releases);
assertTrue(assignments.isEmpty());
assertTrue(releases.isEmpty());
}
@Test
public void testDoubleNodeManagerStartEvent() throws Throwable {
getRole0Status().setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
List<Container> allocations = engine.execute(ops);
List<ContainerAssignment> assignments = new ArrayList<>();
List<AbstractRMOperation> releases = new ArrayList<>();
appState.onContainersAllocated(allocations, assignments, releases);
assertListLength(assignments, 1);
ContainerAssignment assigned = assignments.get(0);
Container target = assigned.container;
RoleInstance ri = roleInstance(assigned);
appState.containerStartSubmitted(target, ri);
RoleInstance ri2 = appState.innerOnNodeManagerContainerStarted(target
.getId());
assertEquals(ri2, ri);
//try a second time, expect an error
try {
appState.innerOnNodeManagerContainerStarted(target.getId());
fail("Expected an exception");
} catch (RuntimeException expected) {
// expected
}
//and non-faulter should not downgrade to a null
LOG.warn("Ignore any exception/stack trace that appears below");
LOG.warn("===============================================================");
RoleInstance ri3 = appState.onNodeManagerContainerStarted(target.getId());
LOG.warn("===============================================================");
LOG.warn("Ignore any exception/stack trace that appeared above");
assertNull(ri3);
}
}

View File

@ -0,0 +1,117 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.slider.api.resource.Application;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockAppState;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.NodeMap;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Test that app state is rebuilt on a restart.
*/
public class TestMockAppStateRebuildOnAMRestart extends BaseMockAppStateTest
implements MockRoles {
@Override
public String getTestName() {
return "TestMockAppStateRebuildOnAMRestart";
}
@Test
public void testRebuild() throws Throwable {
int r0 = 1;
int r1 = 2;
int r2 = 3;
getRole0Status().setDesired(r0);
getRole1Status().setDesired(r1);
getRole2Status().setDesired(r2);
List<RoleInstance> instances = createAndStartNodes();
int clusterSize = r0 + r1 + r2;
assertEquals(instances.size(), clusterSize);
//clone the list
List<Container> containers = new ArrayList<>();
for (RoleInstance ri : instances) {
containers.add(ri.container);
}
NodeMap nodemap = appState.getRoleHistory().cloneNodemap();
//and rebuild
AppStateBindingInfo bindingInfo = buildBindingInfo();
bindingInfo.application = factory.newApplication(r0, r1, r2)
.name(getTestName());
bindingInfo.liveContainers = containers;
appState = new MockAppState(bindingInfo);
assertEquals(appState.getLiveContainers().size(), clusterSize);
appState.getRoleHistory().dump();
//check that the app state direct structures match
List<RoleInstance> r0live = appState.enumLiveNodesInRole(ROLE0);
List<RoleInstance> r1live = appState.enumLiveNodesInRole(ROLE1);
List<RoleInstance> r2live = appState.enumLiveNodesInRole(ROLE2);
assertEquals(r0, r0live.size());
assertEquals(r1, r1live.size());
assertEquals(r2, r2live.size());
//now examine the role history
NodeMap newNodemap = appState.getRoleHistory().cloneNodemap();
for (NodeInstance nodeInstance : newNodemap.values()) {
String hostname = nodeInstance.hostname;
NodeInstance orig = nodemap.get(hostname);
assertNotNull("Null entry in original nodemap for " + hostname, orig);
for (int i : Arrays.asList(getRole0Status().getKey(), getRole1Status()
.getKey(), getRole2Status().getKey())) {
assertEquals(nodeInstance.getActiveRoleInstances(i), orig
.getActiveRoleInstances(i));
NodeEntry origRE = orig.getOrCreate(i);
NodeEntry newRE = nodeInstance.getOrCreate(i);
assertEquals(origRE.getLive(), newRE.getLive());
assertEquals(0, newRE.getStarting());
}
}
assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
Application application = appState.getClusterStatus();
// verify the AM restart container count was set
Long restarted = application.getNumberOfRunningContainers();
assertNotNull(restarted);
//and that the count == 1 master + the region servers
assertEquals(restarted.longValue(), (long)containers.size());
}
}

View File

@ -0,0 +1,122 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.apache.slider.server.appmaster.state.ContainerAssignment;
import org.apache.slider.server.appmaster.state.RoleHistoryUtils;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole;
/**
* Test that the app state lets you ask for nodes, get a specific host,
* release it and then get that one back again.
*/
public class TestMockAppStateRolePlacement extends BaseMockAppStateTest
implements MockRoles {
@Override
public String getTestName() {
return "TestMockAppStateRolePlacement";
}
@Test
public void testAllocateReleaseRealloc() throws Throwable {
getRole0Status().setDesired(1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
ContainerRequestOperation operation = (ContainerRequestOperation)ops
.get(0);
AMRMClient.ContainerRequest request = operation.getRequest();
assertTrue(request.getRelaxLocality());
assertNull(request.getNodes());
assertNull(request.getRacks());
assertNotNull(request.getCapability());
Container allocated = engine.allocateContainer(request);
List<ContainerAssignment> assignments = new ArrayList<>();
List<AbstractRMOperation> releaseOperations = new ArrayList<>();
appState.onContainersAllocated(Arrays.asList((Container)allocated),
assignments, releaseOperations);
// verify the release matches the allocation
assertEquals(releaseOperations.size(), 1);
CancelSingleRequest cancelOp = (CancelSingleRequest)releaseOperations
.get(0);
assertNotNull(cancelOp.getRequest());
assertNotNull(cancelOp.getRequest().getCapability());
assertEquals(cancelOp.getRequest().getCapability(), allocated
.getResource());
// now the assignment
assertEquals(assignments.size(), 1);
ContainerAssignment assigned = assignments.get(0);
Container container = assigned.container;
assertEquals(container.getId(), allocated.getId());
int roleId = assigned.role.getPriority();
assertEquals(roleId, extractRole(request.getPriority()));
assertEquals(assigned.role.getName(), ROLE0);
String containerHostname = RoleHistoryUtils.hostnameOf(container);
RoleInstance ri = roleInstance(assigned);
//tell the app it arrived
appState.containerStartSubmitted(container, ri);
assertNotNull(appState.onNodeManagerContainerStarted(container.getId()));
assertEquals(getRole0Status().getRunning(), 1);
ops = appState.reviewRequestAndReleaseNodes();
assertEquals(ops.size(), 0);
//now it is surplus
getRole0Status().setDesired(0);
ops = appState.reviewRequestAndReleaseNodes();
ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0);
assertEquals(release.getContainerId(), container.getId());
engine.execute(ops);
assertNotNull(appState.onCompletedContainer(containerStatus(container
.getId())).roleInstance);
//view the world
appState.getRoleHistory().dump();
//now ask for a new one
getRole0Status().setDesired(1);
ops = appState.reviewRequestAndReleaseNodes();
assertEquals(ops.size(), 1);
operation = (ContainerRequestOperation) ops.get(0);
AMRMClient.ContainerRequest request2 = operation.getRequest();
assertNotNull(request2);
assertEquals(request2.getNodes().get(0), containerHostname);
assertFalse(request2.getRelaxLocality());
engine.execute(ops);
}
}

View File

@ -0,0 +1,82 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* Test that if you have >1 role, the right roles are chosen for release.
*/
public class TestMockAppStateRoleRelease extends BaseMockAppStateTest
implements MockRoles {
@Override
public String getTestName() {
return "TestMockAppStateRoleRelease";
}
/**
* Small cluster with multiple containers per node,
* to guarantee many container allocations on each node.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(4, 4);
}
@Test
public void testAllocateReleaseRealloc() throws Throwable {
/**
* Allocate to all nodes
*/
getRole0Status().setDesired(6);
getRole1Status().setDesired(5);
getRole2Status().setDesired(4);
List<RoleInstance> instances = createAndStartNodes();
assertEquals(instances.size(), 15);
//now it is surplus
getRole0Status().setDesired(0);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
List<ContainerId> released = new ArrayList<>();
engine.execute(ops, released);
List<ContainerId> ids = extractContainerIds(instances, ROLE0);
for (ContainerId cid : released) {
assertNotNull(appState.onCompletedContainer(containerStatus(cid))
.roleInstance);
assertTrue(ids.contains(cid));
}
//view the world
appState.getRoleHistory().dump();
}
}

View File

@ -0,0 +1,111 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.api.resource.Resource;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import java.util.Collections;
/**
* Test that if you have more than one role, the right roles are chosen for
* release.
*/
public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
implements MockRoles {
@Override
public String getTestName() {
return "TestMockAppStateUniqueNames";
}
/**
* Small cluster with multiple containers per node,
* to guarantee many container allocations on each node.
* @return
*/
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(4, 4);
}
@Override
public AppStateBindingInfo buildBindingInfo() {
AppStateBindingInfo bindingInfo = super.buildBindingInfo();
bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
return bindingInfo;
}
@Override
public Application buildApplication() {
Application application = super.buildApplication();
Component component = new Component().name("group1").numberOfContainers(2L)
.resource(new Resource().memory("1024").cpus(2))
.uniqueComponentSupport(true);
application.getComponents().add(component);
return application;
}
@Test
public void testDynamicFlexDown() throws Throwable {
createAndStartNodes();
appState.updateComponents(Collections.singletonMap("group1", 0L));
createAndStartNodes();
RoleStatus roleStatus = appState.lookupRoleStatus("group11");
assertEquals(0, roleStatus.getDesired());
assertEquals(1024L, roleStatus.getResourceRequirements().getMemorySize());
assertEquals(2, roleStatus.getResourceRequirements().getVirtualCores());
assertEquals("group1", roleStatus.getGroup());
}
@Test
public void testDynamicFlexUp() throws Throwable {
createAndStartNodes();
appState.updateComponents(Collections.singletonMap("group1", 3L));
createAndStartNodes();
RoleStatus group11 = appState.lookupRoleStatus("group11");
RoleStatus group12 = appState.lookupRoleStatus("group12");
RoleStatus group13 = appState.lookupRoleStatus("group13");
assertEquals(1, group11.getDesired());
assertEquals(1, group12.getDesired());
assertEquals(1, group13.getDesired());
assertEquals(1024L, group11.getResourceRequirements().getMemorySize());
assertEquals(1024L, group12.getResourceRequirements().getMemorySize());
assertEquals(1024L, group13.getResourceRequirements().getMemorySize());
assertEquals(2, group11.getResourceRequirements().getVirtualCores());
assertEquals(2, group12.getResourceRequirements().getVirtualCores());
assertEquals(2, group13.getResourceRequirements().getVirtualCores());
assertEquals("group1", group11.getGroup());
assertEquals("group1", group12.getGroup());
assertEquals("group1", group13.getGroup());
appState.refreshClusterStatus();
}
}

View File

@ -0,0 +1,89 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockAppState;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.junit.Test;
import java.util.Collections;
import java.util.List;
/**
* Test the container resource allocation logic.
*/
public class TestMockContainerResourceAllocations extends BaseMockAppStateTest {
@Override
public Application buildApplication() {
return factory.newApplication(1, 0, 0).name(getTestName());
}
@Test
public void testNormalAllocations() throws Throwable {
Component role0 = appState.getClusterStatus().getComponent(MockRoles.ROLE0);
role0.resource(new org.apache.slider.api.resource.Resource().memory("512")
.cpus(2));
appState.updateComponents(Collections.singletonMap(role0.getName(),
role0.getNumberOfContainers()));
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertEquals(1, ops.size());
ContainerRequestOperation operation = (ContainerRequestOperation) ops
.get(0);
Resource requirements = operation.getRequest().getCapability();
assertEquals(512L, requirements.getMemorySize());
assertEquals(2, requirements.getVirtualCores());
}
@Test
public void testMaxMemAllocations() throws Throwable {
// max core allocations no longer supported
Component role0 = appState.getClusterStatus().getComponent(MockRoles.ROLE0);
role0.resource(new org.apache.slider.api.resource.Resource()
.memory(ResourceKeys.YARN_RESOURCE_MAX).cpus(2));
appState.updateComponents(Collections.singletonMap(role0.getName(),
role0.getNumberOfContainers()));
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertEquals(1, ops.size());
ContainerRequestOperation operation = (ContainerRequestOperation) ops
.get(0);
Resource requirements = operation.getRequest().getCapability();
assertEquals(MockAppState.RM_MAX_RAM, requirements.getMemorySize());
assertEquals(2, requirements.getVirtualCores());
}
@Test
public void testMaxDefaultAllocations() throws Throwable {
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertEquals(ops.size(), 1);
ContainerRequestOperation operation = (ContainerRequestOperation) ops
.get(0);
Resource requirements = operation.getRequest().getCapability();
assertEquals(ResourceKeys.DEF_YARN_MEMORY, requirements.getMemorySize());
assertEquals(ResourceKeys.DEF_YARN_CORES, requirements.getVirtualCores());
}
}

View File

@ -0,0 +1,156 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoles;
import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* Test Anti-affine placement.
*/
public class TestMockLabelledAAPlacement extends BaseMockAppStateAATest
implements MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(TestMockLabelledAAPlacement.class);
private static final int NODES = 3;
private static final int GPU_NODES = 2;
private static final String HOST0 = "00000000";
private static final String HOST1 = "00000001";
@Override
public void setup() throws Exception {
super.setup();
updateNodes(MockFactory.INSTANCE.newNodeReport(HOST0, NodeState.RUNNING,
LABEL_GPU));
updateNodes(MockFactory.INSTANCE.newNodeReport(HOST1, NodeState.RUNNING,
LABEL_GPU));
}
@Override
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(NODES, 8);
}
void assertAllContainersAA() {
assertAllContainersAA(getGpuRole().getKey());
}
/**
*
* @throws Throwable
*/
@Test
public void testAskForTooMany() throws Throwable {
RoleStatus gpuRole = getGpuRole();
describe("Ask for 1 more than the no of available nodes;" +
" expect the final request to be unsatisfied until the cluster " +
"changes size");
//more than expected
int size = GPU_NODES;
gpuRole.setDesired(size + 1);
List<AbstractRMOperation > operations = appState
.reviewRequestAndReleaseNodes();
assertTrue(gpuRole.isAARequestOutstanding());
assertEquals(gpuRole.getAAPending(), size);
for (int i = 0; i < size; i++) {
String iter = "Iteration " + i + " role = " + getAaRole();
describe(iter);
List<AbstractRMOperation > operationsOut = new ArrayList<>();
List<RoleInstance> roleInstances = submitOperations(operations,
EMPTY_ID_LIST, operationsOut);
// one instance per request
assertEquals(1, roleInstances.size());
appState.onNodeManagerContainerStarted(roleInstances.get(0)
.getContainerId());
assertAllContainersAA();
// there should be none left
LOG.debug(nodeInformationSnapshotAsString());
operations = operationsOut;
if (i + 1 < size) {
assertEquals(2, operations.size());
} else {
assertEquals(1, operations.size());
}
}
// expect an outstanding AA request to be unsatisfied
assertTrue(gpuRole.getRunning() < gpuRole.getDesired());
assertEquals(0, gpuRole.getRequested());
assertFalse(gpuRole.isAARequestOutstanding());
List<Container> allocatedContainers = engine.execute(operations,
EMPTY_ID_LIST);
assertEquals(0, allocatedContainers.size());
// in a review now, no more requests can be generated, as there is no
// space for AA placements, even though there is cluster capacity
assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
// switch node 2 into being labelled
NodeUpdatedOutcome outcome = updateNodes(MockFactory.INSTANCE.
newNodeReport("00000002", NodeState.RUNNING, "gpu"));
assertEquals(NODES, cloneNodemap().size());
assertTrue(outcome.clusterChanged);
// no active calls to empty
assertTrue(outcome.operations.isEmpty());
assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
}
protected AppState.NodeUpdatedOutcome addNewNode() {
return updateNodes(MockFactory.INSTANCE.newNodeReport("00000004",
NodeState.RUNNING, "gpu"));
}
@Test
public void testClusterSizeChangesDuringRequestSequence() throws Throwable {
RoleStatus gpuRole = getGpuRole();
describe("Change the cluster size where the cluster size changes during " +
"a test sequence.");
gpuRole.setDesired(GPU_NODES + 1);
List<AbstractRMOperation> operations = appState
.reviewRequestAndReleaseNodes();
assertTrue(gpuRole.isAARequestOutstanding());
assertEquals(GPU_NODES, gpuRole.getAAPending());
NodeUpdatedOutcome outcome = addNewNode();
assertTrue(outcome.clusterChanged);
// one call to cancel
assertEquals(1, outcome.operations.size());
// and on a review, one more to rebuild
assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
}
}

View File

@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.appstate;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.slider.server.appmaster.state.ContainerPriority;
import org.apache.slider.server.appmaster.state.OutstandingRequest;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
/**
* Test outstanding request validation.
*/
public class TestOutstandingRequestValidation extends SliderTestBase {
private static final String[] H1 = hosts("one");
@Test
public void testRelaxedNohostsOrLabels() throws Throwable {
createAndValidate(null, null, true);
}
@Test
public void testRelaxedLabels() throws Throwable {
createAndValidate(null, "gpu", true);
}
@Test
public void testNonRelaxedLabels() throws Throwable {
expectCreationFailure(null, "gpu", false);
}
@Test
public void testRelaxedHostNoLabel() throws Throwable {
createAndValidate(H1, "", true);
}
/**
* Use varargs for simple list to array conversion.
* @param hostnames host names
* @return
*/
public static String[] hosts(String...hostnames) {
return hostnames;
}
void expectCreationFailure(
String[] hosts,
String labels,
boolean relaxLocality) {
try {
ContainerRequest result = createAndValidate(hosts, labels, relaxLocality);
fail("Expected an exception, got " + result);
} catch (IllegalArgumentException expected) {
assertTrue(expected.toString()
.contains("Can't turn off locality relaxation on a request with no " +
"location constraints"));
}
}
AMRMClient.ContainerRequest createAndValidate(
String[] hosts,
String labels,
boolean relaxLocality) {
int cores = 1;
int memory = 64;
int p = 1;
Priority pri = ContainerPriority.createPriority(p, !relaxLocality);
ContainerRequest issuedRequest =
newRequest(pri, hosts, labels, relaxLocality);
OutstandingRequest.validateContainerRequest(issuedRequest, p, "");
return issuedRequest;
}
AMRMClient.ContainerRequest newRequest(
Priority pri,
String[] hosts,
String labels,
boolean relaxLocality) {
int cores = 1;
int memory = 64;
Resource resource = Resource.newInstance(memory, cores);
return new AMRMClient.ContainerRequest(resource,
hosts,
null,
pri,
relaxLocality,
labels);
}
}

View File

@ -0,0 +1,269 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.slider.api.proto.Messages;
import org.apache.slider.api.types.NodeInformation;
import org.apache.slider.api.types.NodeInformationList;
import org.apache.slider.api.types.RestTypeMarshalling;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.NodeMap;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.utils.SliderTestBase;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
/**
* Test anti-affine placement.
*/
public class TestRoleHistoryAA extends SliderTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryAA.class);
private List<String> hostnames = Arrays.asList("1", "2", "3");
private NodeMap nodeMap, gpuNodeMap;
private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
public TestRoleHistoryAA() throws BadConfigException {
}
@Override
public void setup() throws Exception {
super.setup();
nodeMap = createNodeMap(hostnames, NodeState.RUNNING, "");
gpuNodeMap = createNodeMap(hostnames, NodeState.RUNNING, "GPU");
}
@Test
public void testFindNodesInFullCluster() throws Throwable {
// all three will surface at first
verifyResultSize(3, nodeMap.findAllNodesForRole(1, ""));
}
@Test
public void testFindNodesInUnhealthyCluster() throws Throwable {
// all three will surface at first
markNodeOneUnhealthy();
verifyResultSize(2, nodeMap.findAllNodesForRole(1, ""));
}
public boolean markNodeOneUnhealthy() {
return setNodeState(nodeMap.get("1"), NodeState.UNHEALTHY);
}
protected boolean setNodeState(NodeInstance node, NodeState state) {
return node.updateNode(MockFactory.INSTANCE.newNodeReport(node.hostname,
state, ""));
}
@Test
public void testFindNoNodesWrongLabel() throws Throwable {
// all three will surface at first
verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU"));
}
@Test
public void testFindSomeNodesSomeLabel() throws Throwable {
// all three will surface at first
update(nodeMap,
Arrays.asList(MockFactory.INSTANCE.newNodeReport("1", NodeState
.RUNNING, "GPU")));
List<NodeInstance> gpuNodes = nodeMap.findAllNodesForRole(1, "GPU");
verifyResultSize(1, gpuNodes);
NodeInstance instance = gpuNodes.get(0);
instance.getOrCreate(1).onStarting();
assertFalse(instance.canHost(1, "GPU"));
assertFalse(instance.canHost(1, ""));
verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU"));
}
@Test
public void testFindNoNodesRightLabel() throws Throwable {
// all three will surface at first
verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, "GPU"));
}
@Test
public void testFindNoNodesNoLabel() throws Throwable {
// all three will surface at first
verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, ""));
}
@Test
public void testFindNoNodesClusterRequested() throws Throwable {
// all three will surface at first
for (NodeInstance ni : nodeMap.values()) {
ni.getOrCreate(1).request();
}
assertNoAvailableNodes(1);
}
@Test
public void testFindNoNodesClusterBusy() throws Throwable {
// all three will surface at first
for (NodeInstance ni : nodeMap.values()) {
ni.getOrCreate(1).request();
}
assertNoAvailableNodes(1);
}
/**
* Tag all nodes as starting, then walk one through a bit
* more of its lifecycle.
*/
@Test
public void testFindNoNodesLifecycle() throws Throwable {
// all three will surface at first
for (NodeInstance ni : nodeMap.values()) {
ni.getOrCreate(1).onStarting();
}
assertNoAvailableNodes(1);
// walk one of the nodes through the lifecycle
NodeInstance node1 = nodeMap.get("1");
assertFalse(node1.canHost(1, ""));
node1.get(1).onStartCompleted();
assertFalse(node1.canHost(1, ""));
assertNoAvailableNodes(1);
node1.get(1).release();
assertTrue(node1.canHost(1, ""));
List<NodeInstance> list2 =
verifyResultSize(1, nodeMap.findAllNodesForRole(1, ""));
assertEquals(list2.get(0).hostname, "1");
// now tag that node as unhealthy and expect it to go away
markNodeOneUnhealthy();
assertNoAvailableNodes(1);
}
@Test
public void testRolesIndependent() throws Throwable {
NodeInstance node1 = nodeMap.get("1");
NodeEntry role1 = node1.getOrCreate(1);
NodeEntry role2 = node1.getOrCreate(2);
for (NodeInstance ni : nodeMap.values()) {
ni.updateNode(MockFactory.INSTANCE.newNodeReport("0", NodeState
.UNHEALTHY, ""));
}
assertNoAvailableNodes(1);
assertNoAvailableNodes(2);
assertTrue(setNodeState(node1, NodeState.RUNNING));
// tag role 1 as busy
role1.onStarting();
assertNoAvailableNodes(1);
verifyResultSize(1, nodeMap.findAllNodesForRole(2, ""));
assertTrue(node1.canHost(2, ""));
}
@Test
public void testNodeEntryAvailablity() throws Throwable {
NodeEntry entry = new NodeEntry(1);
assertTrue(entry.isAvailable());
entry.onStarting();
assertFalse(entry.isAvailable());
entry.onStartCompleted();
assertFalse(entry.isAvailable());
entry.release();
assertTrue(entry.isAvailable());
entry.onStarting();
assertFalse(entry.isAvailable());
entry.onStartFailed();
assertTrue(entry.isAvailable());
}
@Test
public void testNodeInstanceSerialization() throws Throwable {
MockRoleHistory rh2 = new MockRoleHistory(new ArrayList<>());
rh2.getOrCreateNodeInstance("localhost");
NodeInstance instance = rh2.getOrCreateNodeInstance("localhost");
instance.getOrCreate(1).onStartCompleted();
Map<Integer, String> naming = Collections.singletonMap(1, "manager");
NodeInformation ni = instance.serialize(naming);
assertEquals(1, ni.entries.get("manager").live);
NodeInformation ni2 = rh2.getNodeInformation("localhost", naming);
assertEquals(1, ni2.entries.get("manager").live);
Map<String, NodeInformation> info = rh2.getNodeInformationSnapshot(naming);
assertEquals(1, info.get("localhost").entries.get("manager").live);
NodeInformationList nil = new NodeInformationList(info.values());
assertEquals(1, nil.get(0).entries.get("manager").live);
Messages.NodeInformationProto nodeInformationProto =
RestTypeMarshalling.marshall(ni);
Messages.NodeEntryInformationProto entryProto = nodeInformationProto
.getEntries(0);
assertNotNull(entryProto);
assertEquals(1, entryProto.getPriority());
NodeInformation unmarshalled =
RestTypeMarshalling.unmarshall(nodeInformationProto);
assertEquals(unmarshalled.hostname, ni.hostname);
assertTrue(unmarshalled.entries.keySet().containsAll(ni.entries.keySet()));
}
@Test
public void testBuildRolenames() throws Throwable {
}
public List<NodeInstance> assertNoAvailableNodes(int role) {
String label = "";
return verifyResultSize(0, nodeMap.findAllNodesForRole(role, label));
}
List<NodeInstance> verifyResultSize(int size, List<NodeInstance> list) {
if (list.size() != size) {
for (NodeInstance ni : list) {
LOG.error(ni.toFullString());
}
}
assertEquals(size, list.size());
return list;
}
NodeMap createNodeMap(List<NodeReport> nodeReports)
throws BadConfigException {
NodeMap newNodeMap = new NodeMap(1);
update(newNodeMap, nodeReports);
return newNodeMap;
}
protected boolean update(NodeMap nm, List<NodeReport> nodeReports) {
return nm.buildOrUpdate(nodeReports);
}
NodeMap createNodeMap(List<String> hosts, NodeState state,
String label) throws BadConfigException {
return createNodeMap(MockFactory.INSTANCE.createNodeReports(hosts, state,
label));
}
}

View File

@ -0,0 +1,447 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockContainer;
import org.apache.slider.server.appmaster.model.mock.MockNodeId;
import org.apache.slider.server.appmaster.state.ContainerOutcome;
import org.apache.slider.server.appmaster.state.ContainerPriority;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.NodeMap;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Test container events at the role history level -one below
* the App State.
*/
public class TestRoleHistoryContainerEvents extends BaseMockAppStateTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryContainerEvents.class);
@Override
public String getTestName() {
return "TestRoleHistoryContainerEvents";
}
private NodeInstance age1Active4;
private NodeInstance age2Active2;
private NodeInstance age3Active0;
private NodeInstance age4Active1;
private NodeInstance age2Active0;
private RoleHistory roleHistory;
private Resource resource;
AMRMClient.ContainerRequest requestContainer(RoleStatus roleStatus) {
return roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
}
@Override
public void setup() throws Exception {
super.setup();
age1Active4 = nodeInstance(1, 4, 0, 0);
age2Active2 = nodeInstance(2, 2, 0, 1);
age3Active0 = nodeInstance(3, 0, 0, 0);
age4Active1 = nodeInstance(4, 1, 0, 0);
age2Active0 = nodeInstance(2, 0, 0, 0);
roleHistory = appState.getRoleHistory();
roleHistory.insert(Arrays.asList(age2Active2, age2Active0,
age4Active1, age1Active4, age3Active0));
roleHistory.buildRecentNodeLists();
resource = Resource.newInstance(ResourceKeys.DEF_YARN_CORES,
ResourceKeys.DEF_YARN_MEMORY);
}
@Test
public void testFindAndCreate() throws Throwable {
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
List<String> requestNodes = request.getNodes();
assertNotNull(requestNodes);
assertEquals(1, requestNodes.size());
String hostname = requestNodes.get(0);
assertEquals(hostname, age3Active0.hostname);
//build a container
MockContainer container = factory.newContainer();
container.setNodeId(new MockNodeId(hostname, 0));
container.setPriority(request.getPriority());
roleHistory.onContainerAssigned(container);
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertEquals(1, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
RoleInstance ri = new RoleInstance(container);
//start it
roleHistory.onContainerStartSubmitted(container, ri);
//later, declare that it started
roleHistory.onContainerStarted(container);
assertEquals(0, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
assertEquals(1, roleEntry.getActive());
assertEquals(1, roleEntry.getLive());
}
@Test
public void testCreateAndRelease() throws Throwable {
RoleStatus roleStatus = getRole1Status();
//verify it is empty
assertTrue(roleHistory.listActiveNodes(roleStatus.getKey()).isEmpty());
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
assertNull(request.getNodes());
//pick an idle host
String hostname = age3Active0.hostname;
//build a container
MockContainer container = factory.newContainer(new MockNodeId(hostname,
0), request.getPriority());
roleHistory.onContainerAssigned(container);
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertEquals(1, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
RoleInstance ri = new RoleInstance(container);
//start it
roleHistory.onContainerStartSubmitted(container, ri);
//later, declare that it started
roleHistory.onContainerStarted(container);
assertEquals(0, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
assertEquals(1, roleEntry.getActive());
assertEquals(1, roleEntry.getLive());
// now pick that instance to destroy
List<NodeInstance> activeNodes = roleHistory.listActiveNodes(roleStatus
.getKey());
assertEquals(1, activeNodes.size());
NodeInstance target = activeNodes.get(0);
assertEquals(target, allocated);
roleHistory.onContainerReleaseSubmitted(container);
assertEquals(1, roleEntry.getReleasing());
assertEquals(1, roleEntry.getLive());
assertEquals(0, roleEntry.getActive());
// release completed
roleHistory.onReleaseCompleted(container);
assertEquals(0, roleEntry.getReleasing());
assertEquals(0, roleEntry.getLive());
assertEquals(0, roleEntry.getActive());
// verify it is empty
assertTrue(roleHistory.listActiveNodes(roleStatus.getKey()).isEmpty());
// ask for a container and expect to get the recently released one
AMRMClient.ContainerRequest request2 =
requestContainer(roleStatus);
List<String> nodes2 = request2.getNodes();
assertNotNull(nodes2);
String hostname2 = nodes2.get(0);
//pick an idle host
assertEquals(hostname2, age3Active0.hostname);
}
@Test
public void testStartWithoutWarning() throws Throwable {
//pick an idle host
String hostname = age3Active0.hostname;
//build a container
MockContainer container = factory.newContainer(
new MockNodeId(hostname, 0),
ContainerPriority.createPriority(getRole0Status().getKey(), false));
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(getRole0Status().getKey());
//tell RH that it started
roleHistory.onContainerStarted(container);
assertEquals(0, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
assertEquals(1, roleEntry.getActive());
assertEquals(1, roleEntry.getLive());
}
@Test
public void testStartFailed() throws Throwable {
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
LOG.info("req {}", request);
LOG.info("{}", request.getNodes());
String hostname = request.getNodes().get(0);
assertEquals(hostname, age3Active0.hostname);
//build a container
MockContainer container = factory.newContainer(new MockNodeId(hostname,
0), request.getPriority());
roleHistory.onContainerAssigned(container);
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertEquals(1, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
RoleInstance ri = new RoleInstance(container);
//start it
roleHistory.onContainerStartSubmitted(container, ri);
//later, declare that it failed on startup
assertFalse(roleHistory.onNodeManagerContainerStartFailed(container));
assertEquals(0, roleEntry.getStarting());
assertEquals(1, roleEntry.getStartFailed());
assertEquals(1, roleEntry.getFailed());
assertTrue(roleEntry.isAvailable());
assertEquals(0, roleEntry.getActive());
assertEquals(0, roleEntry.getLive());
}
@Test
public void testStartFailedWithoutWarning() throws Throwable {
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
String hostname = request.getNodes().get(0);
assertEquals(hostname, age3Active0.hostname);
//build a container
MockContainer container = factory.newContainer();
container.setNodeId(new MockNodeId(hostname, 0));
container.setPriority(request.getPriority());
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertFalse(roleHistory.onNodeManagerContainerStartFailed(container));
assertEquals(0, roleEntry.getStarting());
assertEquals(1, roleEntry.getStartFailed());
assertEquals(1, roleEntry.getFailed());
assertTrue(roleEntry.isAvailable());
assertEquals(0, roleEntry.getActive());
assertEquals(0, roleEntry.getLive());
}
@Test
public void testContainerFailed() throws Throwable {
describe("fail a container without declaring it as starting");
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
String hostname = request.getNodes().get(0);
assertEquals(hostname, age3Active0.hostname);
//build a container
MockContainer container = factory.newContainer();
container.setNodeId(new MockNodeId(hostname, 0));
container.setPriority(request.getPriority());
roleHistory.onContainerAssigned(container);
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertEquals(1, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
RoleInstance ri = new RoleInstance(container);
//start it
roleHistory.onContainerStartSubmitted(container, ri);
roleHistory.onContainerStarted(container);
//later, declare that it failed
roleHistory.onFailedContainer(
container,
false,
ContainerOutcome.Failed);
assertEquals(0, roleEntry.getStarting());
assertTrue(roleEntry.isAvailable());
assertEquals(0, roleEntry.getActive());
assertEquals(0, roleEntry.getLive());
}
@Test
public void testContainerFailedWithoutWarning() throws Throwable {
describe("fail a container without declaring it as starting");
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
String hostname = request.getNodes().get(0);
assertEquals(hostname, age3Active0.hostname);
//build a container
MockContainer container = factory.newContainer();
container.setNodeId(new MockNodeId(hostname, 0));
container.setPriority(request.getPriority());
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertTrue(roleEntry.isAvailable());
roleHistory.onFailedContainer(
container,
false,
ContainerOutcome.Failed);
assertEquals(0, roleEntry.getStarting());
assertEquals(1, roleEntry.getFailed());
assertTrue(roleEntry.isAvailable());
assertEquals(0, roleEntry.getActive());
assertEquals(0, roleEntry.getLive());
}
@Test
public void testAllocationListPrep() throws Throwable {
describe("test prepareAllocationList");
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
requestContainer(roleStatus);
String hostname = request.getNodes().get(0);
assertEquals(hostname, age3Active0.hostname);
MockContainer container1 = factory.newContainer();
container1.setNodeId(new MockNodeId(hostname, 0));
container1.setPriority(Priority.newInstance(getRole0Status().getKey()));
MockContainer container2 = factory.newContainer();
container2.setNodeId(new MockNodeId(hostname, 0));
container2.setPriority(Priority.newInstance(getRole1Status().getKey()));
// put containers in List with role == 1 first
List<Container> containers = Arrays.asList((Container) container2,
(Container) container1);
List<Container> sortedContainers = roleHistory.prepareAllocationList(
containers);
// verify that the first container has role == 0 after sorting
MockContainer c1 = (MockContainer) sortedContainers.get(0);
assertEquals(getRole0Status().getKey(), c1.getPriority().getPriority());
MockContainer c2 = (MockContainer) sortedContainers.get(1);
assertEquals(getRole1Status().getKey(), c2.getPriority().getPriority());
}
@Test
public void testNodeUpdated() throws Throwable {
describe("fail a node");
RoleStatus roleStatus = getRole0Status();
AMRMClient.ContainerRequest request =
roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
String hostname = request.getNodes().get(0);
assertEquals(age3Active0.hostname, hostname);
// build a container
MockContainer container = factory.newContainer(new MockNodeId(hostname,
0), request.getPriority());
roleHistory.onContainerAssigned(container);
NodeMap nodemap = roleHistory.cloneNodemap();
NodeInstance allocated = nodemap.get(hostname);
NodeEntry roleEntry = allocated.get(roleStatus.getKey());
assertEquals(1, roleEntry.getStarting());
assertFalse(roleEntry.isAvailable());
RoleInstance ri = new RoleInstance(container);
// start it
roleHistory.onContainerStartSubmitted(container, ri);
roleHistory.onContainerStarted(container);
int startSize = nodemap.size();
// now send a list of updated (failed) nodes event
List<NodeReport> nodesUpdated = new ArrayList<>();
NodeReport nodeReport = NodeReport.newInstance(
NodeId.newInstance(hostname, 0),
NodeState.LOST,
null, null, null, null, 1, null, 0);
nodesUpdated.add(nodeReport);
roleHistory.onNodesUpdated(nodesUpdated);
nodemap = roleHistory.cloneNodemap();
int endSize = nodemap.size();
// as even unused nodes are added to the list, we expect the map size to
// be >1
assertTrue(startSize <= endSize);
assertNotNull(nodemap.get(hostname));
assertFalse(nodemap.get(hostname).isOnline());
// add a failure of a node we've never head of
String newhost = "newhost";
nodesUpdated = Arrays.asList(
NodeReport.newInstance(
NodeId.newInstance(newhost, 0),
NodeState.LOST,
null, null, null, null, 1, null, 0)
);
roleHistory.onNodesUpdated(nodesUpdated);
NodeMap nodemap2 = roleHistory.cloneNodemap();
assertNotNull(nodemap2.get(newhost));
assertFalse(nodemap2.get(newhost).isOnline());
}
}

View File

@ -0,0 +1,177 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.state.ContainerOutcome;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Testing finding nodes for new instances.
*
* This stresses the non-AA codepath
*/
public class TestRoleHistoryFindNodesForNewInstances extends
BaseMockAppStateTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryFindNodesForNewInstances.class);
public TestRoleHistoryFindNodesForNewInstances() throws BadConfigException {
}
@Override
public String getTestName() {
return "TestFindNodesForNewInstances";
}
private NodeInstance age1Active4;
private NodeInstance age2Active2;
private NodeInstance age3Active0;
private NodeInstance age4Active1;
private NodeInstance age2Active0;
private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
private RoleStatus roleStat;
private RoleStatus roleStat2;
@Override
public void setup() throws Exception {
super.setup();
age1Active4 = nodeInstance(1, 4, 0, 0);
age2Active2 = nodeInstance(2, 2, 0, 1);
age3Active0 = nodeInstance(3, 0, 0, 0);
age4Active1 = nodeInstance(4, 1, 0, 0);
age2Active0 = nodeInstance(2, 0, 0, 0);
roleHistory.insert(Arrays.asList(age2Active2, age2Active0, age4Active1,
age1Active4, age3Active0));
roleHistory.buildRecentNodeLists();
roleStat = getRole0Status();
roleStat2 = getRole2Status();
}
public List<NodeInstance> findNodes(int count) {
return findNodes(count, roleStat);
}
public List<NodeInstance> findNodes(int count, RoleStatus roleStatus) {
List <NodeInstance> found = new ArrayList<>();
for (int i = 0; i < count; i++) {
NodeInstance f = roleHistory.findRecentNodeForNewInstance(roleStatus);
if (f != null) {
found.add(f);
}
}
return found;
}
@Test
public void testFind1NodeR0() throws Throwable {
NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
LOG.info("found: {}", found);
assertTrue(Arrays.asList(age3Active0).contains(found));
}
@Test
public void testFind2NodeR0() throws Throwable {
NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
LOG.info("found: {}", found);
assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found));
NodeInstance found2 = roleHistory.findRecentNodeForNewInstance(roleStat);
LOG.info("found: {}", found2);
assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found2));
assertNotEquals(found, found2);
}
@Test
public void testFind3NodeR0ReturnsNull() throws Throwable {
assertEquals(2, findNodes(2).size());
NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
assertNull(found);
}
@Test
public void testFindNodesOneEntry() throws Throwable {
List<NodeInstance> foundNodes = findNodes(4, roleStat2);
assertEquals(0, foundNodes.size());
}
@Test
public void testFindNodesIndependent() throws Throwable {
assertEquals(2, findNodes(2).size());
roleHistory.dump();
assertEquals(0, findNodes(3, roleStat2).size());
}
@Test
public void testFindNodesFallsBackWhenUsed() throws Throwable {
// mark age2 and active 0 as busy, expect a null back
age2Active0.get(getRole0Status().getKey()).onStartCompleted();
assertNotEquals(0, age2Active0.getActiveRoleInstances(getRole0Status()
.getKey()));
age3Active0.get(getRole0Status().getKey()).onStartCompleted();
assertNotEquals(0, age3Active0.getActiveRoleInstances(getRole0Status()
.getKey()));
NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
if (found != null) {
LOG.info(found.toFullString());
}
assertNull(found);
}
@Test
public void testFindNodesSkipsFailingNode() throws Throwable {
// mark age2 and active 0 as busy, expect a null back
NodeEntry entry0 = age2Active0.get(getRole0Status().getKey());
entry0.containerCompleted(
false,
ContainerOutcome.Failed);
assertTrue(entry0.getFailed() > 0);
assertTrue(entry0.getFailedRecently() > 0);
entry0.containerCompleted(
false,
ContainerOutcome.Failed);
assertFalse(age2Active0.exceedsFailureThreshold(roleStat));
// set failure to 1
roleStat.getProviderRole().nodeFailureThreshold = 1;
// threshold is now exceeded
assertTrue(age2Active0.exceedsFailureThreshold(roleStat));
// get the role & expect age3 to be picked up, even though it is older
NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
assertEquals(age3Active0, found);
}
}

View File

@ -0,0 +1,133 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* Unit test to verify the comparators sort as expected.
*/
public class TestRoleHistoryNIComparators extends BaseMockAppStateTest {
private NodeInstance age1Active4;
private NodeInstance age2Active2;
private NodeInstance age3Active0;
private NodeInstance age4Active1;
private NodeInstance empty = new NodeInstance("empty", MockFactory
.ROLE_COUNT);
private NodeInstance age6failing;
private NodeInstance age1failing;
private List<NodeInstance> nodes;
private List<NodeInstance> nodesPlusEmpty;
private List<NodeInstance> allnodes;
private RoleStatus role0Status;
@Override
public void setup() throws Exception {
super.setup();
role0Status = getRole0Status();
age1Active4 = nodeInstance(1001, 4, 0, 0);
age2Active2 = nodeInstance(1002, 2, 0, 0);
age3Active0 = nodeInstance(1003, 0, 0, 0);
age4Active1 = nodeInstance(1004, 1, 0, 0);
age6failing = nodeInstance(1006, 0, 0, 0);
age1failing = nodeInstance(1001, 0, 0, 0);
age6failing.get(role0Status.getKey()).setFailedRecently(2);
age1failing.get(role0Status.getKey()).setFailedRecently(1);
nodes = Arrays.asList(age2Active2, age4Active1, age1Active4, age3Active0);
nodesPlusEmpty = Arrays.asList(age2Active2, age4Active1, age1Active4,
age3Active0, empty);
allnodes = Arrays.asList(age6failing, age2Active2, age4Active1,
age1Active4, age3Active0, age1failing);
}
@Override
public String getTestName() {
return "TestNIComparators";
}
@Test
public void testPreferred() throws Throwable {
Collections.sort(nodes, new NodeInstance.Preferred(role0Status.getKey()));
assertListEquals(nodes, Arrays.asList(age4Active1, age3Active0,
age2Active2, age1Active4));
}
/**
* The preferred sort still includes failures; up to next phase in process
* to handle that.
* @throws Throwable
*/
@Test
public void testPreferredWithFailures() throws Throwable {
Collections.sort(allnodes, new NodeInstance.Preferred(role0Status
.getKey()));
assertEquals(allnodes.get(0), age6failing);
assertEquals(allnodes.get(1), age4Active1);
}
@Test
public void testPreferredComparatorDowngradesFailures() throws Throwable {
NodeInstance.Preferred preferred = new NodeInstance.Preferred(role0Status
.getKey());
assertEquals(-1, preferred.compare(age6failing, age1failing));
assertEquals(1, preferred.compare(age1failing, age6failing));
}
@Test
public void testNewerThanNoRole() throws Throwable {
Collections.sort(nodesPlusEmpty, new NodeInstance.Preferred(role0Status
.getKey()));
assertListEquals(nodesPlusEmpty, Arrays.asList(age4Active1, age3Active0,
age2Active2, age1Active4, empty));
}
@Test
public void testMoreActiveThan() throws Throwable {
Collections.sort(nodes, new NodeInstance.MoreActiveThan(role0Status
.getKey()));
assertListEquals(nodes, Arrays.asList(age1Active4, age2Active2,
age4Active1, age3Active0));
}
@Test
public void testMoreActiveThanEmpty() throws Throwable {
Collections.sort(nodesPlusEmpty, new NodeInstance.MoreActiveThan(
role0Status.getKey()));
assertListEquals(nodesPlusEmpty, Arrays.asList(age1Active4, age2Active2,
age4Active1, age3Active0, empty));
}
}

View File

@ -0,0 +1,385 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.api.resource.Application;
import org.apache.slider.api.resource.Component;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.providers.ProviderRole;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockAppState;
import org.apache.slider.server.appmaster.model.mock.MockContainer;
import org.apache.slider.server.appmaster.model.mock.MockNodeId;
import org.apache.slider.server.appmaster.model.mock.MockPriority;
import org.apache.slider.server.appmaster.model.mock.MockResource;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome;
import org.apache.slider.server.appmaster.state.ContainerAllocationResults;
import org.apache.slider.server.appmaster.state.ContainerPriority;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.OutstandingRequest;
import org.apache.slider.server.appmaster.state.OutstandingRequestTracker;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Test outstanding request tracker.
*/
public class TestRoleHistoryOutstandingRequestTracker extends
BaseMockAppStateTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryOutstandingRequestTracker.class);
public static final String WORKERS_LABEL = "workers";
private NodeInstance host1 = new NodeInstance("host1", 3);
private NodeInstance host2 = new NodeInstance("host2", 3);
private MockResource resource = factory.newResource(48, 1);
private OutstandingRequestTracker tracker = new OutstandingRequestTracker();
public static final String WORKER = "worker";
@Override
public Application buildApplication() {
Application application = super.buildApplication();
Component component = new Component().name("worker").numberOfContainers(0L);
component.getConfiguration().setProperty(ResourceKeys.YARN_LABEL_EXPRESSION,
WORKERS_LABEL);
application.getComponents().add(component);
return application;
}
@Test
public void testAddRetrieveEntry() throws Throwable {
OutstandingRequest request = tracker.newRequest(host1, 0);
assertEquals(tracker.lookupPlacedRequest(0, "host1"), request);
assertEquals(tracker.removePlacedRequest(request), request);
assertNull(tracker.lookupPlacedRequest(0, "host1"));
}
@Test
public void testAddCompleteEntry() throws Throwable {
OutstandingRequest req1 = tracker.newRequest(host1, 0);
req1.buildContainerRequest(resource, getRole0Status(), 0);
tracker.newRequest(host2, 0).buildContainerRequest(resource,
getRole0Status(), 0);
tracker.newRequest(host1, 1).buildContainerRequest(resource,
getRole0Status(), 0);
ContainerAllocationResults allocation = tracker.onContainerAllocated(1,
"host1", null);
assertEquals(allocation.outcome, ContainerAllocationOutcome.Placed);
assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest);
assertNull(tracker.lookupPlacedRequest(1, "host1"));
assertNotNull(tracker.lookupPlacedRequest(0, "host1"));
}
@Test
public void testResetOpenRequests() throws Throwable {
OutstandingRequest req1 = tracker.newRequest(null, 0);
assertFalse(req1.isLocated());
tracker.newRequest(host1, 0);
List<OutstandingRequest> openRequests = tracker.listOpenRequests();
assertEquals(1, openRequests.size());
tracker.resetOutstandingRequests(0);
assertTrue(tracker.listOpenRequests().isEmpty());
assertTrue(tracker.listPlacedRequests().isEmpty());
}
@Test
public void testRemoveOpenRequestUnissued() throws Throwable {
OutstandingRequest req1 = tracker.newRequest(null, 0);
req1.buildContainerRequest(resource, getRole0Status(), 0);
assertEquals(1, tracker.listOpenRequests().size());
MockContainer c1 = factory.newContainer(null, new MockPriority(0));
c1.setResource(resource);
ContainerAllocationResults allocation =
tracker.onContainerAllocated(0, "host1", c1);
ContainerAllocationOutcome outcome = allocation.outcome;
assertEquals(outcome, ContainerAllocationOutcome.Unallocated);
assertTrue(allocation.operations.isEmpty());
assertEquals(1, tracker.listOpenRequests().size());
}
@Test
public void testIssuedOpenRequest() throws Throwable {
OutstandingRequest req1 = tracker.newRequest(null, 0);
req1.buildContainerRequest(resource, getRole0Status(), 0);
assertEquals(1, tracker.listOpenRequests().size());
int pri = ContainerPriority.buildPriority(0, false);
assertTrue(pri > 0);
MockNodeId nodeId = factory.newNodeId("hostname-1");
MockContainer c1 = factory.newContainer(nodeId, new MockPriority(pri));
c1.setResource(resource);
ContainerRequest issued = req1.getIssuedRequest();
assertEquals(issued.getCapability(), resource);
assertEquals(issued.getPriority().getPriority(), c1.getPriority()
.getPriority());
assertTrue(req1.resourceRequirementsMatch(resource));
ContainerAllocationResults allocation =
tracker.onContainerAllocated(0, nodeId.getHost(), c1);
assertEquals(0, tracker.listOpenRequests().size());
assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest);
assertEquals(allocation.outcome, ContainerAllocationOutcome.Open);
assertEquals(allocation.origin, req1);
}
@Test
public void testResetEntries() throws Throwable {
tracker.newRequest(host1, 0);
tracker.newRequest(host2, 0);
tracker.newRequest(host1, 1);
List<NodeInstance> canceled = tracker.resetOutstandingRequests(0);
assertEquals(2, canceled.size());
assertTrue(canceled.contains(host1));
assertTrue(canceled.contains(host2));
assertNotNull(tracker.lookupPlacedRequest(1, "host1"));
assertNull(tracker.lookupPlacedRequest(0, "host1"));
canceled = tracker.resetOutstandingRequests(0);
assertEquals(0, canceled.size());
assertEquals(1, tracker.resetOutstandingRequests(1).size());
}
@Test
public void testEscalation() throws Throwable {
// first request: default placement
assertEquals(getRole0Status().getPlacementPolicy(), PlacementPolicy
.DEFAULT);
Resource res0 = newResource(getRole0Status());
OutstandingRequest outstanding0 = tracker.newRequest(host1,
getRole0Status().getKey());
ContainerRequest initialRequest =
outstanding0.buildContainerRequest(res0, getRole0Status(), 0);
assertNotNull(outstanding0.getIssuedRequest());
assertTrue(outstanding0.isLocated());
assertFalse(outstanding0.isEscalated());
assertFalse(initialRequest.getRelaxLocality());
assertEquals(1, tracker.listPlacedRequests().size());
// second. This one doesn't get launched. This is to verify that the
// escalation process skips entries which are in the list but have not
// been issued, which can be a race condition between request issuance &
// escalation.
// (not one observed outside test authoring, but retained for completeness)
Resource res2 = newResource(getRole2Status());
OutstandingRequest outstanding2 = tracker.newRequest(host1,
getRole2Status().getKey());
// simulate some time escalation of role 1 MUST now be triggered
long interval = getRole0Status().getPlacementTimeoutSeconds() * 1000 + 500;
long now = interval;
final List<AbstractRMOperation> escalations = tracker
.escalateOutstandingRequests(now);
assertTrue(outstanding0.isEscalated());
assertFalse(outstanding2.isEscalated());
// two entries
assertEquals(2, escalations.size());
AbstractRMOperation e1 = escalations.get(0);
assertTrue(e1 instanceof CancelSingleRequest);
final CancelSingleRequest cancel = (CancelSingleRequest) e1;
assertEquals(initialRequest, cancel.getRequest());
AbstractRMOperation e2 = escalations.get(1);
assertTrue(e2 instanceof ContainerRequestOperation);
ContainerRequestOperation escRequest = (ContainerRequestOperation) e2;
assertTrue(escRequest.getRequest().getRelaxLocality());
// build that second request from an anti-affine entry
// these get placed as well
now += interval;
ContainerRequest containerReq2 =
outstanding2.buildContainerRequest(res2, getRole2Status(), now);
// escalate a little bit more
final List<AbstractRMOperation> escalations2 = tracker
.escalateOutstandingRequests(now);
// and expect no new entries
assertEquals(0, escalations2.size());
// go past the role2 timeout
now += getRole2Status().getPlacementTimeoutSeconds() * 1000 + 500;
// escalate a little bit more
final List<AbstractRMOperation> escalations3 = tracker
.escalateOutstandingRequests(now);
// and expect another escalation
assertEquals(2, escalations3.size());
assertTrue(outstanding2.isEscalated());
// finally add a strict entry to the mix
Resource res3 = newResource(getRole1Status());
OutstandingRequest outstanding3 = tracker.newRequest(host1,
getRole1Status().getKey());
final ProviderRole providerRole1 = getRole1Status().getProviderRole();
assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT);
now += interval;
assertFalse(outstanding3.mayEscalate());
final List<AbstractRMOperation> escalations4 = tracker
.escalateOutstandingRequests(now);
assertTrue(escalations4.isEmpty());
}
/**
* If the placement does include a label, the initial request must
* <i>not</i> include it.
* The escalation request will contain the label, while
* leaving out the node list.
* retains the node list, but sets relaxLocality==true
* @throws Throwable
*/
@Test
public void testRequestLabelledPlacement() throws Throwable {
NodeInstance ni = new NodeInstance("host1", 0);
OutstandingRequest req1 = tracker.newRequest(ni, 0);
Resource res0 = factory.newResource(48, 1);
RoleStatus workerRole = lookupRole(WORKER);
// initial request
ContainerRequest yarnRequest =
req1.buildContainerRequest(res0, workerRole, 0);
assertEquals(req1.label, WORKERS_LABEL);
assertNull(yarnRequest.getNodeLabelExpression());
assertFalse(yarnRequest.getRelaxLocality());
// escalation
ContainerRequest yarnRequest2 = req1.escalate();
assertNull(yarnRequest2.getNodes());
assertTrue(yarnRequest2.getRelaxLocality());
assertEquals(yarnRequest2.getNodeLabelExpression(), WORKERS_LABEL);
}
/**
* If the placement doesnt include a label, then the escalation request
* retains the node list, but sets relaxLocality==true.
* @throws Throwable
*/
@Test
public void testRequestUnlabelledPlacement() throws Throwable {
NodeInstance ni = new NodeInstance("host1", 0);
OutstandingRequest req1 = tracker.newRequest(ni, 0);
Resource res0 = factory.newResource(48, 1);
// initial request
ContainerRequest yarnRequest = req1.buildContainerRequest(res0,
getRole0Status(), 0);
assertNotNull(yarnRequest.getNodes());
assertTrue(SliderUtils.isUnset(yarnRequest.getNodeLabelExpression()));
assertFalse(yarnRequest.getRelaxLocality());
ContainerRequest yarnRequest2 = req1.escalate();
assertNotNull(yarnRequest2.getNodes());
assertTrue(yarnRequest2.getRelaxLocality());
}
@Test(expected = IllegalArgumentException.class)
public void testAARequestNoNodes() throws Throwable {
tracker.newAARequest(getRole0Status().getKey(), new ArrayList<>(), "");
}
@Test
public void testAARequest() throws Throwable {
int role0 = getRole0Status().getKey();
OutstandingRequest request = tracker.newAARequest(role0, Arrays
.asList(host1), "");
assertEquals(host1.hostname, request.hostname);
assertFalse(request.isLocated());
}
@Test
public void testAARequestPair() throws Throwable {
int role0 = getRole0Status().getKey();
OutstandingRequest request = tracker.newAARequest(role0, Arrays.asList(
host1, host2), "");
assertEquals(host1.hostname, request.hostname);
assertFalse(request.isLocated());
ContainerRequest yarnRequest = request.buildContainerRequest(
getRole0Status().copyResourceRequirements(new MockResource(0, 0)),
getRole0Status(),
0);
assertFalse(yarnRequest.getRelaxLocality());
assertFalse(request.mayEscalate());
assertEquals(2, yarnRequest.getNodes().size());
}
@Test
public void testBuildResourceRequirements() throws Throwable {
// Store original values
Application application = appState.getClusterStatus();
Component role0 = application.getComponent(getRole0Status().getGroup());
String origMem = role0.getResource().getMemory();
Integer origVcores = role0.getResource().getCpus();
// Resource values to be used for this test
int testMem = 32768;
int testVcores = 2;
role0.resource(new org.apache.slider.api.resource.Resource().memory(Integer
.toString(testMem)).cpus(testVcores));
// Test normalization disabled
LOG.info("Test normalization: disabled");
role0.getConfiguration().setProperty(
ResourceKeys.YARN_RESOURCE_NORMALIZATION_ENABLED, "false");
MockResource requestedRes = new MockResource(testMem, testVcores);
MockResource expectedRes = new MockResource(testMem, testVcores);
LOG.info("Resource requested: {}", requestedRes);
Resource resFinal = appState.buildResourceRequirements(getRole0Status());
LOG.info("Resource actual: {}", resFinal);
assertTrue(Resources.equals(expectedRes, resFinal));
// Test normalization enabled
LOG.info("Test normalization: enabled");
role0.getConfiguration().setProperty(
ResourceKeys.YARN_RESOURCE_NORMALIZATION_ENABLED, "true");
expectedRes = new MockResource(MockAppState.RM_MAX_RAM, testVcores);
LOG.info("Resource requested: {}", requestedRes);
resFinal = appState.buildResourceRequirements(getRole0Status());
LOG.info("Resource actual: {}", resFinal);
assertTrue(Resources.equals(expectedRes, resFinal));
// revert resource configuration to original value
role0.resource(new org.apache.slider.api.resource.Resource().memory(origMem)
.cpus(origVcores));
}
public Resource newResource(RoleStatus r) {
return appState.buildResourceRequirements(r);
}
}

View File

@ -0,0 +1,371 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.slider.api.ResourceKeys;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.providers.ProviderRole;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.apache.slider.server.avro.LoadedRoleHistory;
import org.apache.slider.server.avro.RoleHistoryWriter;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Test fole history reading and writing.
*/
public class TestRoleHistoryRW extends BaseMockAppStateTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryRW.class);
private static long time = System.currentTimeMillis();
public static final String HISTORY_V1_6_ROLE =
"org/apache/slider/server/avro/history-v01-6-role.json";
public static final String HISTORY_V1_3_ROLE =
"org/apache/slider/server/avro/history-v01-3-role.json";
public static final String HISTORY_V1B_1_ROLE =
"org/apache/slider/server/avro/history_v01b_1_role.json";
private RoleStatus role0Status;
private RoleStatus role1Status;
static final ProviderRole PROVIDER_ROLE3 = new ProviderRole(
"role3",
3,
PlacementPolicy.STRICT,
3,
3,
ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
@Override
public String getTestName() {
return "TestHistoryRW";
}
@Override
public void setup() throws Exception {
super.setup();
role0Status = getRole0Status();
role1Status = getRole1Status();
}
@Test
public void testWriteReadEmpty() throws Throwable {
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
roleHistory.onStart(fs, historyPath);
Path history = roleHistory.saveHistory(time++);
assertTrue(fs.getFileStatus(history).isFile());
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
historyWriter.read(fs, history);
}
@Test
public void testWriteReadData() throws Throwable {
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
assertFalse(roleHistory.onStart(fs, historyPath));
String addr = "localhost";
NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
NodeEntry ne1 = instance.getOrCreate(0);
ne1.setLastUsed(0xf00d);
Path history = roleHistory.saveHistory(time++);
assertTrue(fs.getFileStatus(history).isFile());
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
assertTrue(0 < loadedRoleHistory.size());
rh2.rebuild(loadedRoleHistory);
NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
assertNotNull(ni2);
NodeEntry ne2 = ni2.get(0);
assertNotNull(ne2);
assertEquals(ne2.getLastUsed(), ne1.getLastUsed());
}
@Test
public void testWriteReadActiveData() throws Throwable {
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
roleHistory.onStart(fs, historyPath);
String addr = "localhost";
String addr2 = "rack1server5";
NodeInstance localhost = roleHistory.getOrCreateNodeInstance(addr);
NodeEntry orig1 = localhost.getOrCreate(role0Status.getKey());
orig1.setLastUsed(0x10);
NodeInstance rack1server5 = roleHistory.getOrCreateNodeInstance(addr2);
NodeEntry orig2 = rack1server5.getOrCreate(role1Status.getKey());
orig2.setLive(3);
assertFalse(orig2.isAvailable());
NodeEntry orig3 = localhost.getOrCreate(role1Status.getKey());
orig3.setLastUsed(0x20);
orig3.setLive(1);
assertFalse(orig3.isAvailable());
orig3.release();
assertTrue(orig3.isAvailable());
roleHistory.dump();
long savetime = 0x0001000;
Path history = roleHistory.saveHistory(savetime);
assertTrue(fs.getFileStatus(history).isFile());
describe("Loaded");
LOG.info("testWriteReadActiveData in {}", history);
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
assertEquals(3, loadedRoleHistory.size());
rh2.rebuild(loadedRoleHistory);
rh2.dump();
assertEquals(2, rh2.getClusterSize());
NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
assertNotNull(ni2);
NodeEntry loadedNE = ni2.get(role0Status.getKey());
assertEquals(loadedNE.getLastUsed(), orig1.getLastUsed());
NodeInstance ni2b = rh2.getExistingNodeInstance(addr2);
assertNotNull(ni2b);
NodeEntry loadedNE2 = ni2b.get(role1Status.getKey());
assertNotNull(loadedNE2);
assertEquals(loadedNE2.getLastUsed(), savetime);
assertEquals(rh2.getThawedDataTime(), savetime);
// now start it
rh2.buildRecentNodeLists();
describe("starting");
rh2.dump();
List<NodeInstance> available0 = rh2.cloneRecentNodeList(role0Status
.getKey());
assertEquals(1, available0.size());
NodeInstance entry = available0.get(0);
assertEquals(entry.hostname, "localhost");
assertEquals(entry, localhost);
List<NodeInstance> available1 = rh2.cloneRecentNodeList(role1Status
.getKey());
assertEquals(2, available1.size());
//and verify that even if last used was set, the save time is picked up
assertEquals(entry.get(role1Status.getKey()).getLastUsed(), roleHistory
.getSaveTime());
}
@Test
public void testWriteThaw() throws Throwable {
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
assertFalse(roleHistory.onStart(fs, historyPath));
String addr = "localhost";
NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
NodeEntry ne1 = instance.getOrCreate(0);
ne1.setLastUsed(0xf00d);
Path history = roleHistory.saveHistory(time++);
long savetime =roleHistory.getSaveTime();
assertTrue(fs.getFileStatus(history).isFile());
RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
assertTrue(rh2.onStart(fs, historyPath));
NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
assertNotNull(ni2);
NodeEntry ne2 = ni2.get(0);
assertNotNull(ne2);
assertEquals(ne2.getLastUsed(), ne1.getLastUsed());
assertEquals(rh2.getThawedDataTime(), savetime);
}
@Test
public void testPurgeOlderEntries() throws Throwable {
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
time = 1;
Path file1 = touch(historyWriter, time++);
Path file2 = touch(historyWriter, time++);
Path file3 = touch(historyWriter, time++);
Path file4 = touch(historyWriter, time++);
Path file5 = touch(historyWriter, time++);
Path file6 = touch(historyWriter, time++);
assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file1));
assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file2));
assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file2));
assertEquals(3, historyWriter.purgeOlderHistoryEntries(fs, file5));
assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file6));
try {
// make an impossible assertion that will fail if the method
// actually completes
assertEquals(-1, historyWriter.purgeOlderHistoryEntries(fs, file1));
} catch (FileNotFoundException ignored) {
// expected
}
}
public Path touch(RoleHistoryWriter historyWriter, long timeMs)
throws IOException {
Path path = historyWriter.createHistoryFilename(historyPath, timeMs);
FSDataOutputStream out = fs.create(path);
out.close();
return path;
}
@Test
public void testSkipEmptyFileOnRead() throws Throwable {
describe("verify that empty histories are skipped on read; old histories " +
"purged");
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
roleHistory.onStart(fs, historyPath);
time = 0;
Path oldhistory = roleHistory.saveHistory(time++);
String addr = "localhost";
NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
NodeEntry ne1 = instance.getOrCreate(0);
ne1.setLastUsed(0xf00d);
Path goodhistory = roleHistory.saveHistory(time++);
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
Path touched = touch(historyWriter, time++);
RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
assertTrue(rh2.onStart(fs, historyPath));
NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
assertNotNull(ni2);
//and assert the older file got purged
assertFalse(fs.exists(oldhistory));
assertTrue(fs.exists(goodhistory));
assertTrue(fs.exists(touched));
}
@Test
public void testSkipBrokenFileOnRead() throws Throwable {
describe("verify that empty histories are skipped on read; old histories " +
"purged");
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
roleHistory.onStart(fs, historyPath);
time = 0;
Path oldhistory = roleHistory.saveHistory(time++);
String addr = "localhost";
NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
NodeEntry ne1 = instance.getOrCreate(0);
ne1.setLastUsed(0xf00d);
Path goodhistory = roleHistory.saveHistory(time++);
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
Path badfile = historyWriter.createHistoryFilename(historyPath, time++);
FSDataOutputStream out = fs.create(badfile);
out.writeBytes("{broken:true}");
out.close();
RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
describe("IGNORE STACK TRACE BELOW");
assertTrue(rh2.onStart(fs, historyPath));
describe("IGNORE STACK TRACE ABOVE");
NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
assertNotNull(ni2);
//and assert the older file got purged
assertFalse(fs.exists(oldhistory));
assertTrue(fs.exists(goodhistory));
assertTrue(fs.exists(badfile));
}
/**
* Test that a v1 JSON file can be read. Here the number of roles
* matches the current state.
* @throws Throwable
*/
@Test
public void testReloadDataV13Role() throws Throwable {
String source = HISTORY_V1_3_ROLE;
RoleHistoryWriter writer = new RoleHistoryWriter();
LoadedRoleHistory loadedRoleHistory = writer.read(source);
assertEquals(4, loadedRoleHistory.size());
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
}
/**
* Test that a v1 JSON file can be read. Here more roles than expected
* @throws Throwable
*/
@Test
public void testReloadDataV16Role() throws Throwable {
String source = HISTORY_V1_6_ROLE;
RoleHistoryWriter writer = new RoleHistoryWriter();
LoadedRoleHistory loadedRoleHistory = writer.read(source);
assertEquals(6, loadedRoleHistory.size());
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
assertEquals(3, roleHistory.rebuild(loadedRoleHistory));
}
/**
* Test that a v1 JSON file can be read. Here the number of roles
* is less than the current state.
* @throws Throwable
*/
@Test
public void testReloadLessRoles() throws Throwable {
String source = HISTORY_V1_3_ROLE;
RoleHistoryWriter writer = new RoleHistoryWriter();
LoadedRoleHistory loadedRoleHistory = writer.read(source);
assertEquals(4, loadedRoleHistory.size());
List<ProviderRole> expandedRoles = new ArrayList(MockFactory.ROLES);
expandedRoles.add(PROVIDER_ROLE3);
RoleHistory roleHistory = new MockRoleHistory(expandedRoles);
assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
}
/**
* Test that a v1b JSON file can be read. Here more roles than expected
* @throws Throwable
*/
@Test
public void testReloadDataV1B1Role() throws Throwable {
String source = HISTORY_V1B_1_ROLE;
RoleHistoryWriter writer = new RoleHistoryWriter();
LoadedRoleHistory loadedRoleHistory = writer.read(source);
assertEquals(1, loadedRoleHistory.size());
assertEquals(2, loadedRoleHistory.roleMap.size());
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
}
}

View File

@ -0,0 +1,162 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.hadoop.fs.Path;
import org.apache.slider.common.SliderKeys;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.avro.NewerFilesFirst;
import org.apache.slider.server.avro.RoleHistoryWriter;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Test role history rw ordering.
*/
public class TestRoleHistoryRWOrdering extends BaseMockAppStateTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryRWOrdering.class);
private List<Path> paths = pathlist(
Arrays.asList(
"hdfs://localhost/history-0406c.json",
"hdfs://localhost/history-5fffa.json",
"hdfs://localhost/history-0001a.json",
"hdfs://localhost/history-0001f.json"
)
);
private Path h0406c = paths.get(0);
private Path h5fffa = paths.get(1);
private Path h0001a = paths.get(3);
public TestRoleHistoryRWOrdering() throws URISyntaxException {
}
List<Path> pathlist(List<String> pathnames) throws URISyntaxException {
List<Path> pathList = new ArrayList<>();
for (String p : pathnames) {
pathList.add(new Path(new URI(p)));
}
return pathList;
}
@Override
public String getTestName() {
return "TestHistoryRWOrdering";
}
/**
* This tests regexp pattern matching. It uses the current time so isn't
* repeatable -but it does test a wider range of values in the process
* @throws Throwable
*/
@Test
public void testPatternRoundTrip() throws Throwable {
describe("test pattern matching of names");
long value=System.currentTimeMillis();
String name = String.format(SliderKeys.HISTORY_FILENAME_CREATION_PATTERN,
value);
String matchpattern = SliderKeys.HISTORY_FILENAME_MATCH_PATTERN;
Pattern pattern = Pattern.compile(matchpattern);
Matcher matcher = pattern.matcher(name);
if (!matcher.find()) {
throw new Exception("No match for pattern $matchpattern in $name");
}
}
@Test
public void testWriteSequenceReadData() throws Throwable {
describe("test that if multiple entries are written, the newest is picked" +
" up");
long time = System.currentTimeMillis();
RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
assertFalse(roleHistory.onStart(fs, historyPath));
String addr = "localhost";
NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
NodeEntry ne1 = instance.getOrCreate(0);
ne1.setLastUsed(0xf00d);
Path history1 = roleHistory.saveHistory(time++);
Path history2 = roleHistory.saveHistory(time++);
Path history3 = roleHistory.saveHistory(time);
//inject a later file with a different name
sliderFileSystem.cat(new Path(historyPath, "file.json"), true, "hello," +
" world");
RoleHistoryWriter historyWriter = new RoleHistoryWriter();
List<Path> entries = historyWriter.findAllHistoryEntries(
fs,
historyPath,
false);
assertEquals(entries.size(), 3);
assertEquals(entries.get(0), history3);
assertEquals(entries.get(1), history2);
assertEquals(entries.get(2), history1);
}
@Test
public void testPathStructure() throws Throwable {
assertEquals(h5fffa.getName(), "history-5fffa.json");
}
@Test
public void testPathnameComparator() throws Throwable {
NewerFilesFirst newerName = new NewerFilesFirst();
LOG.info("{} name is {}", h5fffa, h5fffa.getName());
LOG.info("{} name is {}", h0406c, h0406c.getName());
assertEquals(newerName.compare(h5fffa, h5fffa), 0);
assertTrue(newerName.compare(h5fffa, h0406c) < 0);
assertTrue(newerName.compare(h5fffa, h0001a) < 0);
assertTrue(newerName.compare(h0001a, h5fffa) > 0);
}
@Test
public void testPathSort() throws Throwable {
List<Path> paths2 = new ArrayList<>(paths);
RoleHistoryWriter.sortHistoryPaths(paths2);
assertListEquals(paths2,
Arrays.asList(
paths.get(1),
paths.get(0),
paths.get(3),
paths.get(2)
));
}
}

View File

@ -0,0 +1,298 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.providers.PlacementPolicy;
import org.apache.slider.providers.ProviderRole;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockContainer;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.OutstandingRequest;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.List;
/**
* Test the RH availability list and request tracking: that hosts
* get removed and added.
*/
public class TestRoleHistoryRequestTracking extends BaseMockAppStateTest {
private static final Logger LOG =
LoggerFactory.getLogger(TestRoleHistoryRequestTracking.class);
private String roleName = "test";
private NodeInstance age1Active4;
private NodeInstance age2Active2;
private NodeInstance age2Active0;
private NodeInstance age3Active0;
private NodeInstance age4Active1;
private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
// 1MB, 1 vcore
private Resource resource = Resource.newInstance(1, 1);
private RoleStatus roleStatus;
public TestRoleHistoryRequestTracking() throws BadConfigException {
}
AMRMClient.ContainerRequest requestContainer(RoleStatus rs) {
return roleHistory.requestContainerForRole(rs).getIssuedRequest();
}
@Override
public String getTestName() {
return "TestRoleHistoryAvailableList";
}
@Override
public void setup() throws Exception {
super.setup();
age1Active4 = nodeInstance(1, 4, 0, 0);
age2Active2 = nodeInstance(2, 2, 0, 1);
age2Active0 = nodeInstance(2, 0, 0, 0);
age3Active0 = nodeInstance(3, 0, 0, 0);
age4Active1 = nodeInstance(4, 1, 0, 0);
roleHistory.insert(Arrays.asList(age2Active2, age2Active0, age4Active1,
age1Active4, age3Active0));
roleHistory.buildRecentNodeLists();
roleStatus = getRole0Status();
roleStatus.setResourceRequirements(Resource.newInstance(1, 1));
}
@Test
public void testAvailableListBuiltForRoles() throws Throwable {
List<NodeInstance> available0 = roleHistory.cloneRecentNodeList(
roleStatus.getKey());
assertListEquals(Arrays.asList(age3Active0, age2Active0), available0);
}
@Test
public void testRequestedNodeOffList() throws Throwable {
NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
assertEquals(age3Active0, ni);
assertListEquals(Arrays.asList(age2Active0),
roleHistory.cloneRecentNodeList(roleStatus.getKey()));
roleHistory.requestInstanceOnNode(ni,
roleStatus,
resource
);
}
@Test
public void testRequestedNodeOffListWithFailures() throws Throwable {
assertFalse(roleHistory.cloneRecentNodeList(roleStatus.getKey()).isEmpty());
NodeEntry age3role0 = recordAsFailed(age3Active0, roleStatus.getKey(), 4);
assertTrue(age3Active0.isConsideredUnreliable(roleStatus.getKey(),
roleStatus.getNodeFailureThreshold()));
recordAsFailed(age2Active0, roleStatus.getKey(), 4);
assertTrue(age2Active0.isConsideredUnreliable(roleStatus.getKey(),
roleStatus.getNodeFailureThreshold()));
// expect to get a null node back
NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
assertNull(ni);
// which is translated to a no-location request
AMRMClient.ContainerRequest req = roleHistory.requestInstanceOnNode(ni,
roleStatus,
resource).getIssuedRequest();
assertNull(req.getNodes());
LOG.info("resetting failure count");
age3role0.resetFailedRecently();
roleHistory.dump();
assertEquals(0, age3role0.getFailedRecently());
assertFalse(age3Active0.isConsideredUnreliable(roleStatus.getKey(),
roleStatus.getNodeFailureThreshold()));
assertFalse(roleHistory.cloneRecentNodeList(roleStatus.getKey()).isEmpty());
// looking for a node should now find one
ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
assertEquals(ni, age3Active0);
req = roleHistory.requestInstanceOnNode(ni, roleStatus, resource)
.getIssuedRequest();
assertEquals(1, req.getNodes().size());
}
/**
* Verify that strict placement policies generate requests for nodes
* irrespective of their failed status.
* @throws Throwable
*/
@Test
public void testStrictPlacementIgnoresFailures() throws Throwable {
RoleStatus targetRole = getRole1Status();
final ProviderRole providerRole1 = targetRole.getProviderRole();
assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT);
int key1 = targetRole.getKey();
int key0 = getRole0Status().getKey();
List<NodeInstance> nodes0 = Arrays.asList(age1Active4, age2Active0,
age2Active2, age3Active0, age4Active1);
recordAllFailed(key0, 4, nodes0);
recordAllFailed(key1, 4, nodes0);
// trigger a list rebuild
roleHistory.buildRecentNodeLists();
List<NodeInstance> recentRole0 = roleHistory.cloneRecentNodeList(key0);
assertTrue(recentRole0.indexOf(age3Active0) < recentRole0
.indexOf(age2Active0));
// the non-strict role has no suitable nodes
assertNull(roleHistory.findRecentNodeForNewInstance(getRole0Status()));
NodeInstance ni = roleHistory.findRecentNodeForNewInstance(targetRole);
assertNotNull(ni);
NodeInstance ni2 = roleHistory.findRecentNodeForNewInstance(targetRole);
assertNotNull(ni2);
assertNotEquals(ni, ni2);
}
@Test
public void testFindAndRequestNode() throws Throwable {
AMRMClient.ContainerRequest req = requestContainer(roleStatus);
assertEquals(age3Active0.hostname, req.getNodes().get(0));
List<NodeInstance> a2 = roleHistory.cloneRecentNodeList(roleStatus
.getKey());
assertListEquals(Arrays.asList(age2Active0), a2);
}
@Test
public void testRequestedNodeIntoReqList() throws Throwable {
requestContainer(roleStatus);
List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
assertEquals(1, requests.size());
assertEquals(age3Active0.hostname, requests.get(0).hostname);
}
@Test
public void testCompletedRequestDropsNode() throws Throwable {
AMRMClient.ContainerRequest req = requestContainer(roleStatus);
List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
assertEquals(1, requests.size());
String hostname = requests.get(0).hostname;
assertEquals(age3Active0.hostname, hostname);
assertEquals(hostname, req.getNodes().get(0));
MockContainer container = factory.newContainer(req, hostname);
assertOnContainerAllocated(container, 2, 1);
assertNoOutstandingPlacedRequests();
}
public void assertOnContainerAllocated(Container c1, int p1, int p2) {
assertNotEquals(ContainerAllocationOutcome.Open, roleHistory
.onContainerAllocated(c1, p1, p2).outcome);
}
public void assertOnContainerAllocationOpen(Container c1, int p1, int p2) {
assertEquals(ContainerAllocationOutcome.Open, roleHistory
.onContainerAllocated(c1, p1, p2).outcome);
}
void assertNoOutstandingPlacedRequests() {
assertTrue(roleHistory.listPlacedRequests().isEmpty());
}
public void assertOutstandingPlacedRequests(int i) {
assertEquals(i, roleHistory.listPlacedRequests().size());
}
@Test
public void testTwoRequests() throws Throwable {
AMRMClient.ContainerRequest req = requestContainer(roleStatus);
AMRMClient.ContainerRequest req2 = requestContainer(roleStatus);
List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
assertEquals(2, requests.size());
MockContainer container = factory.newContainer(req, req.getNodes().get(0));
assertOnContainerAllocated(container, 2, 1);
assertOutstandingPlacedRequests(1);
container = factory.newContainer(req2, req2.getNodes().get(0));
assertOnContainerAllocated(container, 2, 2);
assertNoOutstandingPlacedRequests();
}
@Test
public void testThreeRequestsOneUnsatisified() throws Throwable {
AMRMClient.ContainerRequest req = requestContainer(roleStatus);
AMRMClient.ContainerRequest req2 = requestContainer(roleStatus);
AMRMClient.ContainerRequest req3 = requestContainer(roleStatus);
List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
assertEquals(2, requests.size());
MockContainer container = factory.newContainer(req, req.getNodes().get(0));
assertOnContainerAllocated(container, 2, 1);
assertOutstandingPlacedRequests(1);
container = factory.newContainer(req3, "three");
assertOnContainerAllocationOpen(container, 3, 2);
assertOutstandingPlacedRequests(1);
// the final allocation will trigger a cleanup
container = factory.newContainer(req2, "four");
// no node dropped
assertEquals(ContainerAllocationOutcome.Unallocated,
roleHistory.onContainerAllocated(container, 3, 3).outcome);
// yet the list is now empty
assertNoOutstandingPlacedRequests();
roleHistory.listOpenRequests().isEmpty();
// and the remainder goes onto the available list
List<NodeInstance> a2 = roleHistory.cloneRecentNodeList(roleStatus
.getKey());
assertListEquals(Arrays.asList(age2Active0), a2);
}
@Test
public void testThreeRequests() throws Throwable {
AMRMClient.ContainerRequest req = requestContainer(roleStatus);
AMRMClient.ContainerRequest req2 = requestContainer(roleStatus);
AMRMClient.ContainerRequest req3 = requestContainer(roleStatus);
assertOutstandingPlacedRequests(2);
assertNull(req3.getNodes());
MockContainer container = factory.newContainer(req, req.getNodes().get(0));
assertOnContainerAllocated(container, 3, 1);
assertOutstandingPlacedRequests(1);
container = factory.newContainer(req2, req2.getNodes().get(0));
assertOnContainerAllocated(container, 3, 2);
assertNoOutstandingPlacedRequests();
container = factory.newContainer(req3, "three");
assertOnContainerAllocationOpen(container, 3, 3);
assertNoOutstandingPlacedRequests();
}
}

View File

@ -0,0 +1,117 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.history;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.server.appmaster.actions.ResetFailureWindow;
import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
import org.apache.slider.server.appmaster.model.mock.MockAM;
import org.apache.slider.server.appmaster.model.mock.MockFactory;
import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.UpdateBlacklistOperation;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.RoleHistory;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.junit.Test;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
/**
* Test updating blacklist.
*/
public class TestRoleHistoryUpdateBlacklist extends BaseMockAppStateTest {
private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
private Collection<RoleStatus> roleStatuses;
private RoleStatus roleStatus;
private NodeInstance ni;
public TestRoleHistoryUpdateBlacklist() throws BadConfigException {
}
@Override
public String getTestName() {
return "TestUpdateBlacklist";
}
@Override
public void setup() throws Exception {
super.setup();
ni = nodeInstance(1, 0, 0, 0);
roleHistory.insert(Arrays.asList(ni));
roleHistory.buildRecentNodeLists();
appState.setRoleHistory(roleHistory);
roleStatus = getRole0Status();
roleStatuses = Arrays.asList(roleStatus);
}
@Test
public void testUpdateBlacklist() {
assertFalse(ni.isBlacklisted());
// at threshold, blacklist is unmodified
recordAsFailed(ni, roleStatus.getKey(), MockFactory.NODE_FAILURE_THRESHOLD);
UpdateBlacklistOperation op = roleHistory.updateBlacklist(roleStatuses);
assertNull(op);
assertFalse(ni.isBlacklisted());
// threshold is reached, node goes on blacklist
recordAsFailed(ni, roleStatus.getKey(), 1);
op = roleHistory.updateBlacklist(roleStatuses);
assertNotNull(op);
assertTrue(ni.isBlacklisted());
// blacklist remains unmodified
op = roleHistory.updateBlacklist(roleStatuses);
assertNull(op);
assertTrue(ni.isBlacklisted());
// failure threshold reset, node goes off blacklist
ni.resetFailedRecently();
op = roleHistory.updateBlacklist(roleStatuses);
assertNotNull(op);
assertFalse(ni.isBlacklisted());
}
@Test
public void testBlacklistOperations()
throws Exception {
recordAsFailed(ni, roleStatus.getKey(), MockFactory
.NODE_FAILURE_THRESHOLD + 1);
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
assertListLength(ops, 1);
AbstractRMOperation op = ops.get(0);
assertTrue(op instanceof UpdateBlacklistOperation);
assertTrue(ni.isBlacklisted());
MockRMOperationHandler handler = new MockRMOperationHandler();
assertEquals(0, handler.getBlacklisted());
handler.execute(ops);
assertEquals(1, handler.getBlacklisted());
ResetFailureWindow resetter = new ResetFailureWindow(handler);
resetter.execute(new MockAM(), null, appState);
assertEquals(0, handler.getBlacklisted());
assertFalse(ni.isBlacklisted());
}
}

View File

@ -0,0 +1,123 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.mock;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.slider.common.tools.SliderUtils;
/**
* Provides allocation services to a cluster -both random and placed.
*
* Important: container allocations need an app attempt ID put into the
* container ID
*/
public class Allocator {
private final MockYarnCluster cluster;
/**
* Rolling index into the cluster used for the next "random" assignment.
*/
private int rollingIndex = 0;
Allocator(MockYarnCluster cluster) {
this.cluster = cluster;
}
/**
* Allocate a node using the list of nodes in the container as the
* hints.
* @param request request
* @return the allocated container -or null for none
*/
MockContainer allocate(AMRMClient.ContainerRequest request) {
MockYarnCluster.MockYarnClusterNode node = null;
MockYarnCluster.MockYarnClusterContainer allocated = null;
if (SliderUtils.isNotEmpty(request.getNodes())) {
for (String host : request.getNodes()) {
node = cluster.lookup(host);
allocated = node.allocate();
if (allocated != null) {
break;
}
}
}
if (allocated != null) {
return createContainerRecord(request, allocated, node);
} else {
if (request.getRelaxLocality() || request.getNodes().isEmpty()) {
// fallback to anywhere
return allocateRandom(request);
} else {
//no match and locality can't be requested
return null;
}
}
}
/**
* Allocate a node without any positioning -use whatever policy this allocator
* chooses.
* @param request request
* @return the allocated container -or null for none
*/
MockContainer allocateRandom(AMRMClient.ContainerRequest request) {
int start = rollingIndex;
MockYarnCluster.MockYarnClusterNode node = cluster.nodeAt(rollingIndex);
MockYarnCluster.MockYarnClusterContainer allocated = node.allocate();
// if there is no space, try again -but stop when all the nodes
// have failed
while (allocated == null && start != nextIndex()) {
node = cluster.nodeAt(rollingIndex);
allocated = node.allocate();
}
//here the allocation is set, so create the response
return createContainerRecord(request, allocated, node);
}
/**
* Create a container record -if one was allocated.
* @param allocated allocation -may be null
* @param node node with the container
* @return a container record, or null if there was no allocation
*/
public MockContainer createContainerRecord(
AMRMClient.ContainerRequest request,
MockYarnCluster.MockYarnClusterContainer allocated,
MockYarnCluster.MockYarnClusterNode node) {
if (allocated == null) {
// no space
return null;
}
MockContainer container = new MockContainer();
container.setId(new MockContainerId(allocated.getCid()));
container.setNodeId(node.getNodeId());
container.setNodeHttpAddress(node.httpAddress());
container.setPriority(request.getPriority());
container.setResource(request.getCapability());
return container;
}
public int nextIndex() {
rollingIndex = (rollingIndex + 1) % cluster.getClusterSize();
return rollingIndex;
}
}

View File

@ -0,0 +1,524 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.mock;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.client.api.AMRMClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.slider.api.resource.Application;
import org.apache.slider.common.tools.SliderFileSystem;
import org.apache.slider.common.tools.SliderUtils;
import org.apache.slider.core.exceptions.BadClusterStateException;
import org.apache.slider.core.exceptions.BadConfigException;
import org.apache.slider.core.exceptions.SliderInternalStateException;
import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
import org.apache.slider.core.main.LauncherExitCodes;
import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
import org.apache.slider.server.appmaster.state.AppState;
import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
import org.apache.slider.server.appmaster.state.ContainerAssignment;
import org.apache.slider.server.appmaster.state.ContainerOutcome;
import org.apache.slider.server.appmaster.state.NodeEntry;
import org.apache.slider.server.appmaster.state.NodeInstance;
import org.apache.slider.server.appmaster.state.NodeMap;
import org.apache.slider.server.appmaster.state.ProviderAppState;
import org.apache.slider.server.appmaster.state.RoleInstance;
import org.apache.slider.server.appmaster.state.RoleStatus;
import org.apache.slider.server.appmaster.state.StateAccessForProviders;
import org.apache.slider.utils.SliderTestBase;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map.Entry;
/**
* Base for app state tests.
*/
public abstract class BaseMockAppStateTest extends SliderTestBase implements
MockRoles {
private static final Logger LOG =
LoggerFactory.getLogger(BaseMockAppStateTest.class);
protected static final List<ContainerId> EMPTY_ID_LIST = Collections
.emptyList();
protected final MockFactory factory = MockFactory.INSTANCE;
protected MockAppState appState;
protected MockYarnEngine engine;
protected FileSystem fs;
protected SliderFileSystem sliderFileSystem;
protected File historyWorkDir;
protected Path historyPath;
protected MockApplicationId applicationId;
protected MockApplicationAttemptId applicationAttemptId;
protected StateAccessForProviders stateAccess;
/**
* Override point: called in setup() to create the YARN engine; can
* be changed for different sizes and options.
* @return
*/
public MockYarnEngine createYarnEngine() {
return new MockYarnEngine(8, 8);
}
@Override
public void setup() throws Exception {
super.setup();
YarnConfiguration conf = SliderUtils.createConfiguration();
fs = FileSystem.get(new URI("file:///"), conf);
sliderFileSystem = new SliderFileSystem(fs, conf);
engine = createYarnEngine();
initApp();
}
/**
* Initialize the application.
* This uses the binding information supplied by {@link #buildBindingInfo()}.
*/
protected void initApp()
throws IOException, BadConfigException, BadClusterStateException {
String historyDirName = getTestName();
applicationId = new MockApplicationId(1, 0);
applicationAttemptId = new MockApplicationAttemptId(applicationId, 1);
historyWorkDir = new File("target/history", historyDirName);
historyPath = new Path(historyWorkDir.toURI());
fs.delete(historyPath, true);
appState = new MockAppState(buildBindingInfo());
stateAccess = new ProviderAppState(getTestName(), appState);
}
/**
* Build the binding info from the default constructor values,
* the roles from {@link #factory}, and an instance definition.
* from {@link #buildApplication()} ()}
* @return
*/
protected AppStateBindingInfo buildBindingInfo() {
AppStateBindingInfo binding = new AppStateBindingInfo();
binding.application = buildApplication();
//binding.roles = new ArrayList<>(factory.ROLES);
binding.fs = fs;
binding.historyPath = historyPath;
binding.nodeReports = engine.getNodeReports();
return binding;
}
/**
* Override point, define the instance definition.
* @return the instance definition
*/
public Application buildApplication() {
return factory.newApplication(0, 0, 0).name(getTestName());
}
/**
* Get the test name ... defaults to method name
* @return the method name
*/
public String getTestName() {
return methodName.getMethodName();
}
public RoleStatus getRole0Status() {
return lookupRole(ROLE0);
}
public RoleStatus lookupRole(String role) {
return appState.lookupRoleStatus(role);
}
public RoleStatus getRole1Status() {
return lookupRole(ROLE1);
}
public RoleStatus getRole2Status() {
return lookupRole(ROLE2);
}
/**
* Build a role instance from a container assignment.
* @param assigned
* @return the instance
*/
public RoleInstance roleInstance(ContainerAssignment assigned) {
Container target = assigned.container;
RoleInstance ri = new RoleInstance(target);
ri.roleId = assigned.role.getPriority();
ri.role = assigned.role.getName();
return ri;
}
public NodeInstance nodeInstance(long age, int live0, int live1, int live2) {
NodeInstance ni = new NodeInstance(String.format("age%d-[%d,%d,%d]", age,
live0, live1, live2), MockFactory.ROLE_COUNT);
ni.getOrCreate(getRole0Status().getKey()).setLastUsed(age);
ni.getOrCreate(getRole0Status().getKey()).setLive(live0);
if (live1 > 0) {
ni.getOrCreate(getRole1Status().getKey()).setLive(live1);
}
if (live2 > 0) {
ni.getOrCreate(getRole2Status().getKey()).setLive(live2);
}
return ni;
}
/**
* Create a container status event.
* @param c container
* @return a status
*/
ContainerStatus containerStatus(Container c) {
return containerStatus(c.getId());
}
/**
* Create a container status instance for the given ID, declaring
* that it was shut down by the application itself.
* @param cid container Id
* @return the instance
*/
public ContainerStatus containerStatus(ContainerId cid) {
ContainerStatus status = containerStatus(cid,
LauncherExitCodes.EXIT_CLIENT_INITIATED_SHUTDOWN);
return status;
}
public ContainerStatus containerStatus(ContainerId cid, int exitCode) {
ContainerStatus status = ContainerStatus.newInstance(
cid,
ContainerState.COMPLETE,
"",
exitCode);
return status;
}
/**
* Create nodes and bring them to the started state.
* @return a list of roles
*/
protected List<RoleInstance> createAndStartNodes()
throws TriggerClusterTeardownException, SliderInternalStateException {
return createStartAndStopNodes(new ArrayList<>());
}
/**
* Create, Start and stop nodes.
* @param completionResults List filled in with the status on all completed
* nodes
* @return the nodes
*/
public List<RoleInstance> createStartAndStopNodes(
List<AppState.NodeCompletionResult> completionResults)
throws TriggerClusterTeardownException, SliderInternalStateException {
List<ContainerId> released = new ArrayList<>();
List<RoleInstance> instances = createAndSubmitNodes(released);
processSubmissionOperations(instances, completionResults, released);
return instances;
}
/**
* Process the start/stop operations.
* @param instances
* @param completionResults
* @param released
*/
public void processSubmissionOperations(
List<RoleInstance> instances,
List<AppState.NodeCompletionResult> completionResults,
List<ContainerId> released) {
for (RoleInstance instance : instances) {
LOG.debug("Started {} on {}", instance.role, instance.id);
assertNotNull(appState.onNodeManagerContainerStarted(instance
.getContainerId()));
}
releaseContainers(completionResults,
released,
ContainerState.COMPLETE,
"released",
0
);
}
/**
* Release a list of containers, updating the completion results.
* @param completionResults
* @param containerIds
* @param containerState
* @param exitText
* @param containerExitCode
* @return
*/
public void releaseContainers(
List<AppState.NodeCompletionResult> completionResults,
List<ContainerId> containerIds,
ContainerState containerState,
String exitText,
int containerExitCode) {
for (ContainerId id : containerIds) {
ContainerStatus status = ContainerStatus.newInstance(id,
containerState,
exitText,
containerExitCode);
completionResults.add(appState.onCompletedContainer(status));
}
}
/**
* Create nodes and submit them.
* @return a list of roles
*/
public List<RoleInstance> createAndSubmitNodes()
throws TriggerClusterTeardownException, SliderInternalStateException {
return createAndSubmitNodes(new ArrayList<>());
}
/**
* Create nodes and submit them.
* @return a list of roles
*/
public List<RoleInstance> createAndSubmitNodes(List<ContainerId> containerIds)
throws TriggerClusterTeardownException, SliderInternalStateException {
return createAndSubmitNodes(containerIds, new ArrayList<>());
}
/**
* Create nodes and submit them.
* @return a list of roles allocated
*/
public List<RoleInstance> createAndSubmitNodes(
List<ContainerId> containerIds,
List<AbstractRMOperation> operationsOut)
throws TriggerClusterTeardownException, SliderInternalStateException {
List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
return submitOperations(ops, containerIds, operationsOut);
}
public List<RoleInstance> submitOperations(
List<AbstractRMOperation> operationsIn,
List<ContainerId> released) {
return submitOperations(operationsIn, released, new ArrayList<>());
}
/**
* Process the RM operations and send <code>onContainersAllocated</code>
* events to the app state.
* @param operationsIn list of incoming ops
* @param released released containers
* @return list of outbound operations
*/
public List<RoleInstance> submitOperations(
List<AbstractRMOperation> operationsIn,
List<ContainerId> released,
List<AbstractRMOperation> operationsOut) {
List<Container> allocatedContainers = engine.execute(operationsIn,
released);
List<ContainerAssignment> assignments = new ArrayList<>();
appState.onContainersAllocated(allocatedContainers, assignments,
operationsOut);
List<RoleInstance> roles = new ArrayList<>();
for (ContainerAssignment assigned : assignments) {
Container container = assigned.container;
RoleInstance ri = roleInstance(assigned);
//tell the app it arrived
LOG.debug("Start submitted {} on ${}", ri.role, container.getId());
appState.containerStartSubmitted(container, ri);
roles.add(ri);
}
return roles;
}
/**
* Add the AM to the app state.
*/
protected void addAppMastertoAppState() {
// appState.buildAppMasterNode(
// new MockContainerId(applicationAttemptId, 999999L),
// "appmaster",
// 0,
// null);
}
/**
* Extract the list of container IDs from the list of role instances.
* @param instances instance list
* @param role role to look up
* @return the list of CIDs
*/
public List<ContainerId> extractContainerIds(
List<RoleInstance> instances,
String role) {
List<ContainerId> ids = new ArrayList<>();
for (RoleInstance ri : instances) {
if (ri.role.equals(role)) {
ids.add(ri.getContainerId());
}
}
return ids;
}
/**
* Record a node as failing.
* @param node
* @param id
* @param count
* @return the entry
*/
public NodeEntry recordAsFailed(NodeInstance node, int id, int count) {
NodeEntry entry = node.getOrCreate(id);
for (int i = 1; i <= count; i++) {
entry.containerCompleted(
false,
ContainerOutcome.Failed);
}
return entry;
}
protected void recordAllFailed(int id, int count, List<NodeInstance> nodes) {
for (NodeInstance node : nodes) {
recordAsFailed(node, id, count);
}
}
/**
* Get the container request of an indexed entry. Includes some assertions
* for better diagnostics
* @param ops operation list
* @param index index in the list
* @return the request.
*/
public AMRMClient.ContainerRequest getRequest(List<AbstractRMOperation> ops,
int index) {
assertTrue(index < ops.size());
AbstractRMOperation op = ops.get(index);
assertTrue(op instanceof ContainerRequestOperation);
return ((ContainerRequestOperation) op).getRequest();
}
/**
* Get the cancel request of an indexed entry. Includes some assertions for
* better diagnostics
* @param ops operation list
* @param index index in the list
* @return the request.
*/
public AMRMClient.ContainerRequest getCancel(List<AbstractRMOperation> ops,
int index) {
assertTrue(index < ops.size());
AbstractRMOperation op = ops.get(index);
assertTrue(op instanceof CancelSingleRequest);
return ((CancelSingleRequest) op).getRequest();
}
/**
* Get the single request of a list of operations; includes the check for
* the size.
* @param ops operations list of size 1
* @return the request within the first ContainerRequestOperation
*/
public AMRMClient.ContainerRequest getSingleRequest(
List<AbstractRMOperation> ops) {
assertEquals(1, ops.size());
return getRequest(ops, 0);
}
/**
* Get the single request of a list of operations; includes the check for
* the size.
* @param ops operations list of size 1
* @return the request within the first operation
*/
public AMRMClient.ContainerRequest getSingleCancel(
List<AbstractRMOperation> ops) {
assertEquals(1, ops.size());
return getCancel(ops, 0);
}
/**
* Get the single release of a list of operations; includes the check for
* the size.
* @param ops operations list of size 1
* @return the request within the first operation
*/
public ContainerReleaseOperation getSingleRelease(
List<AbstractRMOperation> ops) {
assertEquals(1, ops.size());
AbstractRMOperation op = ops.get(0);
assertTrue(op instanceof ContainerReleaseOperation);
return (ContainerReleaseOperation) op;
}
/**
* Get the node information as a large JSON String.
* @return
*/
protected String nodeInformationSnapshotAsString()
throws UnsupportedEncodingException, JsonProcessingException {
return prettyPrintAsJson(stateAccess.getNodeInformationSnapshot());
}
/**
* Scan through all containers and assert that the assignment is AA.
* @param index role index
*/
protected void assertAllContainersAA(int index) {
for (Entry<String, NodeInstance> nodeMapEntry : cloneNodemap().entrySet()) {
String name = nodeMapEntry.getKey();
NodeInstance ni = nodeMapEntry.getValue();
NodeEntry nodeEntry = ni.get(index);
assertTrue("too many instances on node " + name, nodeEntry == null ||
nodeEntry.isAntiAffinityConstraintHeld());
}
}
/**
* Get a snapshot of the nodemap of the application state.
* @return a cloned nodemap
*/
protected NodeMap cloneNodemap() {
return appState.getRoleHistory().cloneNodemap();
}
/**
* Issue a nodes updated event.
* @param report report to notify
* @return response of AM
*/
protected AppState.NodeUpdatedOutcome updateNodes(NodeReport report) {
return appState.onNodesUpdated(Collections.singletonList(report));
}
}

View File

@ -0,0 +1,26 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.slider.server.appmaster.model.mock;
import org.apache.slider.server.appmaster.SliderAppMaster;
/**
* Mock AM.
*/
public class MockAM extends SliderAppMaster {
}

Some files were not shown because too many files have changed in this diff Show More