Add EC2 credential test for repository-s3 (#31918)

Add EC2 credential test for repository-s3

Relates to #26913
This commit is contained in:
Vladimir Dolzhenko 2018-07-18 12:18:00 +02:00 committed by GitHub
parent 5856c396dd
commit 8235b254ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 485 additions and 80 deletions

View File

@ -137,7 +137,10 @@ class ClusterConfiguration {
this.project = project
}
Map<String, String> systemProperties = new HashMap<>()
// **Note** for systemProperties, settings, keystoreFiles etc:
// value could be a GString that is evaluated to just a String
// there are cases when value depends on task that is not executed yet on configuration stage
Map<String, Object> systemProperties = new HashMap<>()
Map<String, Object> settings = new HashMap<>()
@ -157,7 +160,7 @@ class ClusterConfiguration {
List<Object> dependencies = new ArrayList<>()
@Input
void systemProperty(String property, String value) {
void systemProperty(String property, Object value) {
systemProperties.put(property, value)
}

View File

@ -609,7 +609,6 @@ class ClusterFormationTasks {
/** Adds a task to start an elasticsearch node with the given configuration */
static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) {
// this closure is converted into ant nodes by groovy's AntBuilder
Closure antRunner = { AntBuilder ant ->
ant.exec(executable: node.executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
@ -630,13 +629,6 @@ class ClusterFormationTasks {
node.writeWrapperScript()
}
// we must add debug options inside the closure so the config is read at execution time, as
// gradle task options are not processed until the end of the configuration phase
if (node.config.debug) {
println 'Running elasticsearch in debug mode, suspending until connected on port 8000'
node.env['ES_JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
}
node.getCommandString().eachLine { line -> logger.info(line) }
if (logger.isInfoEnabled() || node.config.daemonize == false) {
@ -654,6 +646,27 @@ class ClusterFormationTasks {
}
start.doLast(elasticsearchRunner)
start.doFirst {
// Configure ES JAVA OPTS - adds system properties, assertion flags, remote debug etc
List<String> esJavaOpts = [node.env.get('ES_JAVA_OPTS', '')]
String collectedSystemProperties = node.config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
esJavaOpts.add(collectedSystemProperties)
esJavaOpts.add(node.config.jvmArgs)
if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) {
// put the enable assertions options before other options to allow
// flexibility to disable assertions for specific packages or classes
// in the cluster-specific options
esJavaOpts.add("-ea")
esJavaOpts.add("-esa")
}
// we must add debug options inside the closure so the config is read at execution time, as
// gradle task options are not processed until the end of the configuration phase
if (node.config.debug) {
println 'Running elasticsearch in debug mode, suspending until connected on port 8000'
esJavaOpts.add('-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000')
}
node.env['ES_JAVA_OPTS'] = esJavaOpts.join(" ")
//
project.logger.info("Starting node in ${node.clusterName} distribution: ${node.config.distribution}")
}
return start

View File

@ -180,15 +180,7 @@ class NodeInfo {
}
args.addAll("-E", "node.portsfile=true")
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) {
// put the enable assertions options before other options to allow
// flexibility to disable assertions for specific packages or classes
// in the cluster-specific options
esJavaOpts = String.join(" ", "-ea", "-esa", esJavaOpts)
}
env = ['ES_JAVA_OPTS': esJavaOpts]
env = [:]
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.key.startsWith('tests.es.')) {
args.add("-E")

View File

@ -89,18 +89,26 @@ String s3TemporarySessionToken = System.getenv("amazon_s3_session_token_temporar
String s3TemporaryBucket = System.getenv("amazon_s3_bucket_temporary")
String s3TemporaryBasePath = System.getenv("amazon_s3_base_path_temporary")
String s3EC2Bucket = System.getenv("amazon_s3_bucket_ec2")
String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2")
// If all these variables are missing then we are testing against the internal fixture instead, which has the following
// credentials hard-coded in.
if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath) {
if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath
&& !s3EC2Bucket && !s3EC2BasePath) {
s3PermanentAccessKey = 's3_integration_test_permanent_access_key'
s3PermanentSecretKey = 's3_integration_test_permanent_secret_key'
s3PermanentBucket = 'permanent-bucket-test'
s3PermanentBasePath = 'integration_test'
s3EC2Bucket = 'ec2-bucket-test'
s3EC2BasePath = 'integration_test'
useFixture = true
} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) {
} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath
|| !s3EC2Bucket || !s3EC2BasePath) {
throw new IllegalArgumentException("not all options specified to run against external S3 service")
}
@ -274,24 +282,52 @@ if (useFixture && minioDistribution) {
integTestMinioRunner.dependsOn(startMinio)
integTestMinioRunner.finalizedBy(stopMinio)
// Minio only supports a single access key, see https://github.com/minio/minio/pull/5968
integTestMinioRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/30_repository_temporary_credentials/*'
integTestMinioRunner.systemProperty 'tests.rest.blacklist', [
'repository_s3/30_repository_temporary_credentials/*',
'repository_s3/40_repository_ec2_credentials/*'
].join(",")
project.check.dependsOn(integTestMinio)
}
File parentFixtures = new File(project.buildDir, "fixtures")
File s3FixtureFile = new File(parentFixtures, 's3Fixture.properties')
task s3FixtureProperties {
outputs.file(s3FixtureFile)
def s3FixtureOptions = [
"tests.seed" : project.testSeed,
"s3Fixture.permanent_bucket_name" : s3PermanentBucket,
"s3Fixture.permanent_key" : s3PermanentAccessKey,
"s3Fixture.temporary_bucket_name" : s3TemporaryBucket,
"s3Fixture.temporary_key" : s3TemporaryAccessKey,
"s3Fixture.temporary_session_token": s3TemporarySessionToken,
"s3Fixture.ec2_bucket_name" : s3EC2Bucket
]
doLast {
file(s3FixtureFile).text = s3FixtureOptions.collect { k, v -> "$k = $v" }.join("\n")
}
}
/** A task to start the AmazonS3Fixture which emulates an S3 service **/
task s3Fixture(type: AntFixture) {
dependsOn testClasses
dependsOn s3FixtureProperties
inputs.file(s3FixtureFile)
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
executable = new File(project.runtimeJavaHome, 'bin/java')
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3PermanentBucket, s3TemporaryBucket
args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3FixtureFile.getAbsolutePath()
}
Map<String, Object> expansions = [
'permanent_bucket': s3PermanentBucket,
'permanent_base_path': s3PermanentBasePath,
'temporary_bucket': s3TemporaryBucket,
'temporary_base_path': s3TemporaryBasePath
'temporary_base_path': s3TemporaryBasePath,
'ec2_bucket': s3EC2Bucket,
'ec2_base_path': s3EC2BasePath
]
processTestResources {
@ -319,6 +355,10 @@ integTestCluster {
/* Use a closure on the string to delay evaluation until tests are executed */
setting 's3.client.integration_test_permanent.endpoint', "http://${-> s3Fixture.addressAndPort}"
setting 's3.client.integration_test_temporary.endpoint', "http://${-> s3Fixture.addressAndPort}"
setting 's3.client.integration_test_ec2.endpoint', "http://${-> s3Fixture.addressAndPort}"
// to redirect InstanceProfileCredentialsProvider to custom auth point
systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", "http://${-> s3Fixture.addressAndPort}"
} else {
println "Using an external service to test the repository-s3 plugin"
}

View File

@ -18,6 +18,14 @@
*/
package org.elasticsearch.repositories.s3;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.common.TriFunction;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.test.fixture.AbstractHttpFixture;
import com.amazonaws.util.DateUtils;
import org.elasticsearch.common.Strings;
@ -26,20 +34,26 @@ import org.elasticsearch.common.path.PathTrie;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.RestUtils;
import org.elasticsearch.test.fixture.AbstractHttpFixture;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiAlphanumOfLength;
import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiAlphanumOfLengthBetween;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
/**
* {@link AmazonS3Fixture} emulates an AWS S3 service
@ -47,63 +61,76 @@ import static java.nio.charset.StandardCharsets.UTF_8;
* he implementation is based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/.
*/
public class AmazonS3Fixture extends AbstractHttpFixture {
private static final String AUTH = "AUTH";
private static final String NON_AUTH = "NON_AUTH";
private static final String EC2_PROFILE = "ec2Profile";
private final Properties properties;
private final Random random;
/** List of the buckets stored on this test server **/
private final Map<String, Bucket> buckets = ConcurrentCollections.newConcurrentMap();
/** Request handlers for the requests made by the S3 client **/
private final PathTrie<RequestHandler> handlers;
private final String permanentBucketName;
private final String temporaryBucketName;
/**
* Creates a {@link AmazonS3Fixture}
*/
private AmazonS3Fixture(final String workingDir, final String permanentBucketName, final String temporaryBucketName) {
private AmazonS3Fixture(final String workingDir, Properties properties) {
super(workingDir);
this.permanentBucketName = permanentBucketName;
this.temporaryBucketName = temporaryBucketName;
this.properties = properties;
this.random = new Random(Long.parseUnsignedLong(requireNonNull(properties.getProperty("tests.seed")), 16));
this.buckets.put(permanentBucketName, new Bucket(permanentBucketName));
this.buckets.put(temporaryBucketName, new Bucket(temporaryBucketName));
this.handlers = defaultHandlers(buckets);
new Bucket("s3Fixture.permanent", false);
new Bucket("s3Fixture.temporary", true);
final Bucket ec2Bucket = new Bucket("s3Fixture.ec2",
randomAsciiAlphanumOfLength(random, 10), randomAsciiAlphanumOfLength(random, 10));
this.handlers = defaultHandlers(buckets, ec2Bucket);
}
private static String nonAuthPath(Request request) {
return nonAuthPath(request.getMethod(), request.getPath());
}
private static String nonAuthPath(String method, String path) {
return NON_AUTH + " " + method + " " + path;
}
private static String authPath(Request request) {
return authPath(request.getMethod(), request.getPath());
}
private static String authPath(String method, String path) {
return AUTH + " " + method + " " + path;
}
@Override
protected Response handle(final Request request) throws IOException {
final RequestHandler handler = handlers.retrieve(request.getMethod() + " " + request.getPath(), request.getParameters());
final String nonAuthorizedPath = nonAuthPath(request);
final RequestHandler nonAuthorizedHandler = handlers.retrieve(nonAuthorizedPath, request.getParameters());
if (nonAuthorizedHandler != null) {
return nonAuthorizedHandler.handle(request);
}
final String authorizedPath = authPath(request);
final RequestHandler handler = handlers.retrieve(authorizedPath, request.getParameters());
if (handler != null) {
final String authorization = request.getHeader("Authorization");
final String permittedBucket;
if (authorization.contains("s3_integration_test_permanent_access_key")) {
final String sessionToken = request.getHeader("x-amz-security-token");
if (sessionToken != null) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Unexpected session token", "");
}
permittedBucket = permanentBucketName;
} else if (authorization.contains("s3_integration_test_temporary_access_key")) {
final String sessionToken = request.getHeader("x-amz-security-token");
if (sessionToken == null) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "No session token", "");
}
if (sessionToken.equals("s3_integration_test_temporary_session_token") == false) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad session token", "");
}
permittedBucket = temporaryBucketName;
} else {
final String bucketName = request.getParam("bucket");
if (bucketName == null) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad access key", "");
}
final String bucket = request.getParam("bucket");
if (bucket != null && permittedBucket.equals(bucket) == false) {
// allow a null bucket to support the multi-object-delete API which
// passes the bucket name in the host header instead of the URL.
if (buckets.containsKey(bucket)) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad bucket", "");
} else {
return newBucketNotFoundError(request.getId(), bucket);
}
final Bucket bucket = buckets.get(bucketName);
if (bucket == null) {
return newBucketNotFoundError(request.getId(), bucketName);
}
final Response authResponse = authenticateBucket(request, bucket);
if (authResponse != null) {
return authResponse;
}
return handler.handle(request);
} else {
@ -111,24 +138,49 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
}
}
public static void main(final String[] args) throws Exception {
if (args == null || args.length != 3) {
throw new IllegalArgumentException(
"AmazonS3Fixture <working directory> <bucket for permanent creds> <bucket for temporary creds>");
private Response authenticateBucket(Request request, Bucket bucket) {
final String authorization = request.getHeader("Authorization");
if (authorization == null) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad access key", "");
}
if (authorization.contains(bucket.key)) {
final String sessionToken = request.getHeader("x-amz-security-token");
if (bucket.token == null) {
if (sessionToken != null) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Unexpected session token", "");
}
} else {
if (sessionToken == null) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "No session token", "");
}
if (sessionToken.equals(bucket.token) == false) {
return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad session token", "");
}
}
}
return null;
}
final AmazonS3Fixture fixture = new AmazonS3Fixture(args[0], args[1], args[2]);
public static void main(final String[] args) throws Exception {
if (args == null || args.length != 2) {
throw new IllegalArgumentException("AmazonS3Fixture <working directory> <property file>");
}
final Properties properties = new Properties();
try (InputStream is = Files.newInputStream(PathUtils.get(args[1]))) {
properties.load(is);
}
final AmazonS3Fixture fixture = new AmazonS3Fixture(args[0], properties);
fixture.listen();
}
/** Builds the default request handlers **/
private static PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> buckets) {
private PathTrie<RequestHandler> defaultHandlers(final Map<String, Bucket> buckets, final Bucket ec2Bucket) {
final PathTrie<RequestHandler> handlers = new PathTrie<>(RestUtils.REST_DECODER);
// HEAD Object
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
objectsPaths("HEAD /{bucket}").forEach(path ->
objectsPaths(authPath(HttpHead.METHOD_NAME, "/{bucket}")).forEach(path ->
handlers.insert(path, (request) -> {
final String bucketName = request.getParam("bucket");
@ -150,7 +202,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
// PUT Object
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
objectsPaths("PUT /{bucket}").forEach(path ->
objectsPaths(authPath(HttpPut.METHOD_NAME, "/{bucket}")).forEach(path ->
handlers.insert(path, (request) -> {
final String destBucketName = request.getParam("bucket");
@ -200,7 +252,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
// DELETE Object
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
objectsPaths("DELETE /{bucket}").forEach(path ->
objectsPaths(authPath(HttpDelete.METHOD_NAME, "/{bucket}")).forEach(path ->
handlers.insert(path, (request) -> {
final String bucketName = request.getParam("bucket");
@ -218,7 +270,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
// GET Object
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
objectsPaths("GET /{bucket}").forEach(path ->
objectsPaths(authPath(HttpGet.METHOD_NAME, "/{bucket}")).forEach(path ->
handlers.insert(path, (request) -> {
final String bucketName = request.getParam("bucket");
@ -239,7 +291,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
// HEAD Bucket
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html
handlers.insert("HEAD /{bucket}", (request) -> {
handlers.insert(authPath(HttpHead.METHOD_NAME, "/{bucket}"), (request) -> {
String bucket = request.getParam("bucket");
if (Strings.hasText(bucket) && buckets.containsKey(bucket)) {
return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE);
@ -251,7 +303,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
// GET Bucket (List Objects) Version 1
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
handlers.insert("GET /{bucket}/", (request) -> {
handlers.insert(authPath(HttpGet.METHOD_NAME, "/{bucket}/"), (request) -> {
final String bucketName = request.getParam("bucket");
final Bucket bucket = buckets.get(bucketName);
@ -269,7 +321,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
// Delete Multiple Objects
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
handlers.insert("POST /", (request) -> {
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), (request) -> {
final List<String> deletes = new ArrayList<>();
final List<String> errors = new ArrayList<>();
@ -292,7 +344,12 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
boolean found = false;
for (Bucket bucket : buckets.values()) {
if (bucket.objects.remove(objectName) != null) {
if (bucket.objects.containsKey(objectName)) {
final Response authResponse = authenticateBucket(request, bucket);
if (authResponse != null) {
return authResponse;
}
bucket.objects.remove(objectName);
found = true;
}
}
@ -311,23 +368,80 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
return newInternalError(request.getId(), "Something is wrong with this POST multiple deletes request");
});
// non-authorized requests
TriFunction<String, String, String, Response> credentialResponseFunction = (profileName, key, token) -> {
final Date expiration = new Date(new Date().getTime() + TimeUnit.DAYS.toMillis(1));
final String response = "{"
+ "\"AccessKeyId\": \"" + key + "\","
+ "\"Expiration\": \"" + DateUtils.formatISO8601Date(expiration) + "\","
+ "\"RoleArn\": \"" + randomAsciiAlphanumOfLengthBetween(random, 1, 20) + "\","
+ "\"SecretAccessKey\": \"" + randomAsciiAlphanumOfLengthBetween(random, 1, 20) + "\","
+ "\"Token\": \"" + token + "\""
+ "}";
final Map<String, String> headers = new HashMap<>(contentType("application/json"));
return new Response(RestStatus.OK.getStatus(), headers, response.getBytes(UTF_8));
};
// GET
//
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/latest/meta-data/iam/security-credentials/"), (request) -> {
final String response = EC2_PROFILE;
final Map<String, String> headers = new HashMap<>(contentType("text/plain"));
return new Response(RestStatus.OK.getStatus(), headers, response.getBytes(UTF_8));
});
// GET
//
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/latest/meta-data/iam/security-credentials/{profileName}"), (request) -> {
final String profileName = request.getParam("profileName");
if (EC2_PROFILE.equals(profileName) == false) {
return new Response(RestStatus.NOT_FOUND.getStatus(), new HashMap<>(), "unknown credentials".getBytes(UTF_8));
}
return credentialResponseFunction.apply(profileName, ec2Bucket.key, ec2Bucket.token);
});
return handlers;
}
private static String prop(Properties properties, String propertyName) {
return requireNonNull(properties.getProperty(propertyName),
"property '" + propertyName + "' is missing");
}
/**
* Represents a S3 bucket.
*/
static class Bucket {
class Bucket {
/** Bucket name **/
final String name;
final String key;
final String token;
/** Blobs contained in the bucket **/
final Map<String, byte[]> objects;
Bucket(final String name) {
this.name = Objects.requireNonNull(name);
private Bucket(final String prefix, final boolean tokenRequired) {
this(prefix, prop(properties, prefix + "_key"),
tokenRequired ? prop(properties, prefix + "_session_token") : null);
}
private Bucket(final String prefix, final String key, final String token) {
this.name = prop(properties, prefix + "_bucket_name");
this.key = key;
this.token = token;
this.objects = ConcurrentCollections.newConcurrentMap();
if (buckets.put(name, this) != null) {
throw new IllegalArgumentException("bucket " + name + " is already registered");
}
}
}

View File

@ -0,0 +1,243 @@
# Integration tests for repository-s3
---
setup:
# Register repository with ec2 credentials
- do:
snapshot.create_repository:
repository: repository_ec2
body:
type: s3
settings:
bucket: ${ec2_bucket}
client: integration_test_ec2
base_path: ${ec2_base_path}
canned_acl: private
storage_class: standard
---
"Snapshot and Restore with repository-s3 using ec2 credentials":
# Get repository
- do:
snapshot.get_repository:
repository: repository_ec2
- match: { repository_ec2.settings.bucket : ${ec2_bucket} }
- match: { repository_ec2.settings.client : "integration_test_ec2" }
- match: { repository_ec2.settings.base_path : ${ec2_base_path} }
- match: { repository_ec2.settings.canned_acl : "private" }
- match: { repository_ec2.settings.storage_class : "standard" }
- is_false: repository_ec2.settings.access_key
- is_false: repository_ec2.settings.secret_key
- is_false: repository_ec2.settings.session_token
# Index documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 1
- snapshot: one
- index:
_index: docs
_type: doc
_id: 2
- snapshot: one
- index:
_index: docs
_type: doc
_id: 3
- snapshot: one
- do:
count:
index: docs
- match: {count: 3}
# Create a first snapshot
- do:
snapshot.create:
repository: repository_ec2
snapshot: snapshot-one
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-one }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.include_global_state: true }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.status:
repository: repository_ec2
snapshot: snapshot-one
- is_true: snapshots
- match: { snapshots.0.snapshot: snapshot-one }
- match: { snapshots.0.state : SUCCESS }
# Index more documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 4
- snapshot: two
- index:
_index: docs
_type: doc
_id: 5
- snapshot: two
- index:
_index: docs
_type: doc
_id: 6
- snapshot: two
- index:
_index: docs
_type: doc
_id: 7
- snapshot: two
- do:
count:
index: docs
- match: {count: 7}
# Create a second snapshot
- do:
snapshot.create:
repository: repository_ec2
snapshot: snapshot-two
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-two }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.get:
repository: repository_ec2
snapshot: snapshot-one,snapshot-two
- is_true: snapshots
- match: { snapshots.0.state : SUCCESS }
- match: { snapshots.1.state : SUCCESS }
# Delete the index
- do:
indices.delete:
index: docs
# Restore the second snapshot
- do:
snapshot.restore:
repository: repository_ec2
snapshot: snapshot-two
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 7}
# Delete the index again
- do:
indices.delete:
index: docs
# Restore the first snapshot
- do:
snapshot.restore:
repository: repository_ec2
snapshot: snapshot-one
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 3}
# Remove the snapshots
- do:
snapshot.delete:
repository: repository_ec2
snapshot: snapshot-two
- do:
snapshot.delete:
repository: repository_ec2
snapshot: snapshot-one
---
"Register a repository with a non existing bucket":
- do:
catch: /repository_exception/
snapshot.create_repository:
repository: repository_ec2
body:
type: s3
settings:
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
client: integration_test_temporary
---
"Register a repository with a non existing client":
- do:
catch: /repository_exception/
snapshot.create_repository:
repository: repository_ec2
body:
type: s3
settings:
bucket: repository_ec2
client: unknown
---
"Get a non existing snapshot":
- do:
catch: /snapshot_missing_exception/
snapshot.get:
repository: repository_ec2
snapshot: missing
---
"Delete a non existing snapshot":
- do:
catch: /snapshot_missing_exception/
snapshot.delete:
repository: repository_ec2
snapshot: missing
---
"Restore a non existing snapshot":
- do:
catch: /snapshot_restore_exception/
snapshot.restore:
repository: repository_ec2
snapshot: missing
wait_for_completion: true
---
teardown:
# Remove our repository
- do:
snapshot.delete_repository:
repository: repository_ec2