Use fixture to test the repository-gcs plugin (#28788)

This commit adds a GoogleCloudStorageFixture that uses the
logic of a GoogleCloudStorageTestServer (added in #28576)
to emulate a remote Google Cloud Storage service.

By adding this fixture and a more complete integration test, we 
should be able to catch more bugs when upgrading the client library.

The fixture is started by the googleCloudStorageFixture task
and a custom Service Account file is created and added to the
Elasticsearch keystore for each test.
This commit is contained in:
Tanguy Leroux 2018-03-09 13:57:27 +01:00 committed by GitHub
parent 033a83b98b
commit 4756790d6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 537 additions and 62 deletions

View File

@ -1,3 +1,8 @@
import org.elasticsearch.gradle.test.AntFixture
import java.security.KeyPair
import java.security.KeyPairGenerator
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@ -52,3 +57,48 @@ thirdPartyAudit.excludes = [
'org.apache.log.Hierarchy',
'org.apache.log.Logger',
]
/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/
task googleCloudStorageFixture(type: AntFixture) {
dependsOn compileTestJava
executable = new File(project.runtimeJavaHome, 'bin/java')
args '-cp', "${ -> project.sourceSets.test.runtimeClasspath.asPath }",
'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture',
baseDir, 'bucket_test'
}
/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/
File serviceAccountFile = new File(project.buildDir, "generated-resources/service_account_test.json")
task createServiceAccountFile() {
dependsOn googleCloudStorageFixture
doLast {
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA")
keyPairGenerator.initialize(1024)
KeyPair keyPair = keyPairGenerator.generateKeyPair()
String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded())
serviceAccountFile.parentFile.mkdirs()
serviceAccountFile.setText("{\n" +
' "type": "service_account",\n' +
' "project_id": "integration_test",\n' +
' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' +
' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' +
' "client_email": "integration_test@appspot.gserviceaccount.com",\n' +
' "client_id": "123456789101112130594",\n' +
" \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" +
" \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" +
' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' +
' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' +
'}', 'UTF-8')
}
}
integTestCluster {
dependsOn createServiceAccountFile, googleCloudStorageFixture
setupCommand 'create-elasticsearch-keystore', 'bin/elasticsearch-keystore', 'create'
setupCommand 'add-credentials-to-elasticsearch-keystore',
'bin/elasticsearch-keystore', 'add-file', 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}"
/* Use a closure on the string to delay evaluation until tests are executed */
setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }"
}

View File

@ -0,0 +1,135 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.gcs;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.mocksocket.MockHttpServer;
import org.elasticsearch.repositories.gcs.GoogleCloudStorageTestServer.Response;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.List;
import java.util.Map;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonList;
/**
* {@link GoogleCloudStorageFixture} is a fixture that emulates a Google Cloud Storage service.
* <p>
* It starts an asynchronous socket server that binds to a random local port. The server parses
* HTTP requests and uses a {@link GoogleCloudStorageTestServer} to handle them before returning
* them to the client as HTTP responses.
*/
public class GoogleCloudStorageFixture {
@SuppressForbidden(reason = "PathUtils#get is fine - we don't have environment here")
public static void main(String[] args) throws Exception {
if (args == null || args.length != 2) {
throw new IllegalArgumentException("GoogleCloudStorageFixture <working directory> <bucket>");
}
final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 43635);
final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0);
try {
final Path workingDirectory = Paths.get(args[0]);
/// Writes the PID of the current Java process in a `pid` file located in the working directory
writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]);
final String addressAndPort = addressToString(httpServer.getAddress());
// Writes the address and port of the http server in a `ports` file located in the working directory
writeFile(workingDirectory, "ports", addressAndPort);
// Emulates a Google Cloud Storage server
final String storageUrl = "http://" + addressAndPort;
final GoogleCloudStorageTestServer storageTestServer = new GoogleCloudStorageTestServer(storageUrl);
storageTestServer.createBucket(args[1]);
httpServer.createContext("/", new ResponseHandler(storageTestServer));
httpServer.start();
// Wait to be killed
Thread.sleep(Long.MAX_VALUE);
} finally {
httpServer.stop(0);
}
}
private static void writeFile(final Path dir, final String fileName, final String content) throws IOException {
final Path tempPidFile = Files.createTempFile(dir, null, null);
Files.write(tempPidFile, singleton(content));
Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE);
}
private static String addressToString(final SocketAddress address) {
final InetSocketAddress inetSocketAddress = (InetSocketAddress) address;
if (inetSocketAddress.getAddress() instanceof Inet6Address) {
return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort();
} else {
return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort();
}
}
@SuppressForbidden(reason = "Use a http server")
static class ResponseHandler implements HttpHandler {
private final GoogleCloudStorageTestServer storageServer;
private ResponseHandler(final GoogleCloudStorageTestServer storageServer) {
this.storageServer = storageServer;
}
@Override
public void handle(HttpExchange exchange) throws IOException {
String method = exchange.getRequestMethod();
String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath();
String query = exchange.getRequestURI().getRawQuery();
Map<String, List<String>> headers = exchange.getRequestHeaders();
ByteArrayOutputStream out = new ByteArrayOutputStream();
Streams.copy(exchange.getRequestBody(), out);
final Response storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray());
Map<String, List<String>> responseHeaders = exchange.getResponseHeaders();
responseHeaders.put("Content-Type", singletonList(storageResponse.contentType));
storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v)));
exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length);
if (storageResponse.body.length > 0) {
exchange.getResponseBody().write(storageResponse.body);
}
exchange.close();
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.repositories.gcs;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.path.PathTrie;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -27,10 +26,11 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.RestUtils;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
@ -39,13 +39,15 @@ import java.util.Map;
import java.util.Objects;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
* {@link GoogleCloudStorageTestServer} emulates a Google Cloud Storage service through a {@link #handle(String, String, byte[])} method
* that provides appropriate responses for specific requests like the real Google Cloud platform would do. It is largely based on official
* documentation available at https://cloud.google.com/storage/docs/json_api/v1/.
* {@link GoogleCloudStorageTestServer} emulates a Google Cloud Storage service through
* a {@link #handle(String, String, String, Map, byte[])} method that provides appropriate
* responses for specific requests like the real Google Cloud platform would do.
* It is largely based on official documentation available at https://cloud.google.com/storage/docs/json_api/v1/.
*/
public class GoogleCloudStorageTestServer {
@ -57,19 +59,22 @@ public class GoogleCloudStorageTestServer {
/** Request handlers for the requests made by the Google Cloud Storage client **/
private final PathTrie<RequestHandler> handlers;
/** Server endpoint **/
private final String endpoint;
/**
* Creates a {@link GoogleCloudStorageTestServer} with the default endpoint
*/
GoogleCloudStorageTestServer() {
this("https://www.googleapis.com", true);
this("https://www.googleapis.com");
}
/**
* Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint,
* potentially prefixing the URL patterns to match with the endpoint name.
* Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint
*/
GoogleCloudStorageTestServer(final String endpoint, final boolean prefixWithEndpoint) {
this.handlers = defaultHandlers(endpoint, prefixWithEndpoint, buckets);
GoogleCloudStorageTestServer(final String endpoint) {
this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null");
this.handlers = defaultHandlers(endpoint, buckets);
}
/** Creates a bucket in the test server **/
@ -77,24 +82,61 @@ public class GoogleCloudStorageTestServer {
buckets.put(bucketName, new Bucket(bucketName));
}
public Response handle(final String method, final String url, byte[] content) throws IOException {
final Map<String, String> params = new HashMap<>();
public String getEndpoint() {
return endpoint;
}
// Splits the URL to extract query string parameters
final String rawPath;
int questionMark = url.indexOf('?');
if (questionMark != -1) {
rawPath = url.substring(0, questionMark);
RestUtils.decodeQueryString(url, questionMark + 1, params);
} else {
rawPath = url;
/**
* Returns a Google Cloud Storage response for the given request
*
* @param method the HTTP method of the request
* @param url the HTTP URL of the request
* @param headers the HTTP headers of the request
* @param body the HTTP request body
* @return a {@link Response}
*
* @throws IOException if something goes wrong
*/
public Response handle(final String method,
final String url,
final Map<String, List<String>> headers,
byte[] body) throws IOException {
final int questionMark = url.indexOf('?');
if (questionMark == -1) {
return handle(method, url, null, headers, body);
}
return handle(method, url.substring(0, questionMark), url.substring(questionMark + 1), headers, body);
}
/**
* Returns a Google Cloud Storage response for the given request
*
* @param method the HTTP method of the request
* @param path the path of the URL of the request
* @param query the queryString of the URL of request
* @param headers the HTTP headers of the request
* @param body the HTTP request body
* @return a {@link Response}
* @throws IOException if something goes wrong
*/
public Response handle(final String method,
final String path,
final String query,
final Map<String, List<String>> headers,
byte[] body) throws IOException {
final Map<String, String> params = new HashMap<>();
if (query != null) {
RestUtils.decodeQueryString(query, 0, params);
}
final RequestHandler handler = handlers.retrieve(method + " " + rawPath, params);
final RequestHandler handler = handlers.retrieve(method + " " + path, params);
if (handler != null) {
return handler.execute(url, params, content);
return handler.execute(params, headers, body);
} else {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "No handler defined for request [method: " + method + ", url: " + url + "]");
return newError(RestStatus.INTERNAL_SERVER_ERROR,
"No handler defined for request [method: " + method + ", path: " + path + "]");
}
}
@ -104,28 +146,24 @@ public class GoogleCloudStorageTestServer {
/**
* Simulates the execution of a Storage request and returns a corresponding response.
*
* @param url the request URL
* @param params the request URL parameters
* @param params the request's query string parameters
* @param headers the request's headers
* @param body the request body provided as a byte array
* @return the corresponding response
*
* @throws IOException if something goes wrong
*/
Response execute(String url, Map<String, String> params, byte[] body) throws IOException;
Response execute(Map<String, String> params, Map<String, List<String>> headers, byte[] body) throws IOException;
}
/** Builds the default request handlers **/
private static PathTrie<RequestHandler> defaultHandlers(final String endpoint,
final boolean prefixWithEndpoint,
final Map<String, Bucket> buckets) {
private static PathTrie<RequestHandler> defaultHandlers(final String endpoint, final Map<String, Bucket> buckets) {
final PathTrie<RequestHandler> handlers = new PathTrie<>(RestUtils.REST_DECODER);
final String prefix = prefixWithEndpoint ? endpoint : "";
// GET Bucket
//
// https://cloud.google.com/storage/docs/json_api/v1/buckets/get
handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}", (url, params, body) -> {
handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}", (params, headers, body) -> {
String name = params.get("bucket");
if (Strings.hasText(name) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing");
@ -141,7 +179,7 @@ public class GoogleCloudStorageTestServer {
// GET Object
//
// https://cloud.google.com/storage/docs/json_api/v1/objects/get
handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> {
handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> {
String objectName = params.get("object");
if (Strings.hasText(objectName) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
@ -163,7 +201,7 @@ public class GoogleCloudStorageTestServer {
// Delete Object
//
// https://cloud.google.com/storage/docs/json_api/v1/objects/delete
handlers.insert("DELETE " + prefix + "/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> {
handlers.insert("DELETE " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> {
String objectName = params.get("object");
if (Strings.hasText(objectName) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing");
@ -184,7 +222,7 @@ public class GoogleCloudStorageTestServer {
// Insert Object (initialization)
//
// https://cloud.google.com/storage/docs/json_api/v1/objects/insert
handlers.insert("POST " + prefix + "/upload/storage/v1/b/{bucket}/o", (url, params, body) -> {
handlers.insert("POST " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> {
if ("resumable".equals(params.get("uploadType")) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable");
}
@ -210,7 +248,7 @@ public class GoogleCloudStorageTestServer {
// Insert Object (upload)
//
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload
handlers.insert("PUT " + prefix + "/upload/storage/v1/b/{bucket}/o", (url, params, body) -> {
handlers.insert("PUT " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> {
String objectId = params.get("upload_id");
if (Strings.hasText(objectId) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing");
@ -232,7 +270,7 @@ public class GoogleCloudStorageTestServer {
// Copy Object
//
// https://cloud.google.com/storage/docs/json_api/v1/objects/copy
handlers.insert("POST " + prefix + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (url, params, body) -> {
handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (params, headers, body)-> {
String source = params.get("src");
if (Strings.hasText(source) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing");
@ -265,7 +303,7 @@ public class GoogleCloudStorageTestServer {
// List Objects
//
// https://cloud.google.com/storage/docs/json_api/v1/objects/list
handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}/o", (url, params, body) -> {
handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o", (params, headers, body) -> {
final Bucket bucket = buckets.get(params.get("bucket"));
if (bucket == null) {
return newError(RestStatus.NOT_FOUND, "bucket not found");
@ -293,7 +331,7 @@ public class GoogleCloudStorageTestServer {
// Download Object
//
// https://cloud.google.com/storage/docs/request-body
handlers.insert("GET " + prefix + "/download/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> {
handlers.insert("GET " + endpoint + "/download/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> {
String object = params.get("object");
if (Strings.hasText(object) == false) {
return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing");
@ -314,7 +352,7 @@ public class GoogleCloudStorageTestServer {
// Batch
//
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
handlers.insert("POST " + prefix + "/batch", (url, params, req) -> {
handlers.insert("POST " + endpoint + "/batch", (params, headers, body) -> {
final List<Response> batchedResponses = new ArrayList<>();
// A batch request body looks like this:
@ -339,37 +377,88 @@ public class GoogleCloudStorageTestServer {
//
// --__END_OF_PART__--
// Here we simply process the request body line by line and delegate to other handlers
// if possible.
Streams.readAllLines(new BufferedInputStream(new ByteArrayInputStream(req)), line -> {
final int indexOfHttp = line.indexOf(" HTTP/1.1");
if (indexOfHttp > 0) {
line = line.substring(0, indexOfHttp);
}
// Default multipart boundary
String boundary = "__END_OF_PART__";
RequestHandler handler = handlers.retrieve(line, params);
if (handler != null) {
try {
batchedResponses.add(handler.execute(line, params, req));
} catch (IOException e) {
batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()));
// Determine the multipart boundary
final List<String> contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type"));
if (contentTypes != null) {
final String contentType = contentTypes.get(0);
if (contentType != null && contentType.contains("multipart/mixed; boundary=")) {
boundary = contentType.replace("multipart/mixed; boundary=", "");
}
}
// Read line by line the batched requests
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(
new ByteArrayInputStream(body), StandardCharsets.UTF_8))) {
String line;
while ((line = reader.readLine()) != null) {
// Start of a batched request
if (line.equals("--" + boundary)) {
Map<String, List<String>> batchedHeaders = new HashMap<>();
// Reads the headers, if any
while ((line = reader.readLine()) != null) {
if (line.equals("\r\n") || line.length() == 0) {
// end of headers
break;
} else {
String[] header = line.split(":", 2);
batchedHeaders.put(header[0], singletonList(header[1]));
}
}
// Reads the method and URL
line = reader.readLine();
String batchedUrl = line.substring(0, line.lastIndexOf(' '));
final Map<String, String> batchedParams = new HashMap<>();
int questionMark = batchedUrl.indexOf('?');
if (questionMark != -1) {
RestUtils.decodeQueryString(batchedUrl.substring(questionMark + 1), 0, batchedParams);
}
// Reads the body
line = reader.readLine();
byte[] batchedBody = new byte[0];
if (line != null || line.startsWith("--" + boundary) == false) {
batchedBody = line.getBytes(StandardCharsets.UTF_8);
}
// Executes the batched request
RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams);
if (handler != null) {
try {
batchedResponses.add(handler.execute(batchedParams, batchedHeaders, batchedBody));
} catch (IOException e) {
batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage()));
}
}
}
}
});
}
// Now we can build the response
String boundary = "__END_OF_PART__";
String sep = "--";
String line = "\r\n";
StringBuilder builder = new StringBuilder();
for (Response response : batchedResponses) {
builder.append(sep).append(boundary).append(line);
builder.append("Content-Type: application/http").append(line);
builder.append(line);
builder.append("HTTP/1.1 ").append(response.status.getStatus());
builder.append(' ').append(response.status.toString());
builder.append(line);
builder.append("HTTP/1.1 ")
.append(response.status.getStatus())
.append(' ')
.append(response.status.toString())
.append(line);
builder.append("Content-Length: ").append(response.body.length).append(line);
builder.append("Content-Type: ").append(response.contentType).append(line);
response.headers.forEach((k, v) -> builder.append(k).append(": ").append(v).append(line));
builder.append(line);
builder.append(new String(response.body, StandardCharsets.UTF_8)).append(line);
builder.append(line);
}
builder.append(line);
@ -379,6 +468,17 @@ public class GoogleCloudStorageTestServer {
return new Response(RestStatus.OK, emptyMap(), "multipart/mixed; boundary=" + boundary, content);
});
// Fake refresh of an OAuth2 token
//
handlers.insert("POST " + endpoint + "/o/oauth2/token", (url, params, req) ->
newResponse(RestStatus.OK, emptyMap(), jsonBuilder()
.startObject()
.field("access_token", "unknown")
.field("token_type", "Bearer")
.field("expires_in", 3600)
.endObject())
);
return handlers;
}

View File

@ -49,8 +49,7 @@ class MockStorage extends com.google.api.client.testing.http.MockHttpTransport {
return new MockLowLevelHttpRequest() {
@Override
public LowLevelHttpResponse execute() throws IOException {
final GoogleCloudStorageTestServer.Response response = server.handle(method, url, getContentAsBytes());
return convert(response);
return convert(server.handle(method, url, getHeaders(), getContentAsBytes()));
}
/** Returns the LowLevelHttpRequest body as an array of bytes **/

View File

@ -1,6 +1,6 @@
# Integration tests for Repository GCS component
# Integration tests for repository-gcs
#
"Repository GCS loaded":
"Plugin repository-gcs is loaded":
- do:
cluster.state: {}
@ -11,3 +11,176 @@
nodes.info: {}
- match: { nodes.$master.plugins.0.name: repository-gcs }
---
"Snapshot/Restore with repository-gcs":
- skip:
version: " - 6.3.0"
reason: repository-gcs was not testable through YAML tests until 6.3.0
# Register repository
- do:
snapshot.create_repository:
repository: repository
body:
type: gcs
settings:
bucket: "bucket_test"
client: "integration_test"
- match: { acknowledged: true }
# Index documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 1
- snapshot: one
- index:
_index: docs
_type: doc
_id: 2
- snapshot: one
- index:
_index: docs
_type: doc
_id: 3
- snapshot: one
- do:
count:
index: docs
- match: {count: 3}
# Create a first snapshot
- do:
snapshot.create:
repository: repository
snapshot: snapshot-one
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-one }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.include_global_state: true }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.status:
repository: repository
snapshot: snapshot-one
- is_true: snapshots
- match: { snapshots.0.snapshot: snapshot-one }
- match: { snapshots.0.state : SUCCESS }
# Index more documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 4
- snapshot: two
- index:
_index: docs
_type: doc
_id: 5
- snapshot: two
- index:
_index: docs
_type: doc
_id: 6
- snapshot: two
- index:
_index: docs
_type: doc
_id: 7
- snapshot: two
- do:
count:
index: docs
- match: {count: 7}
# Create a second snapshot
- do:
snapshot.create:
repository: repository
snapshot: snapshot-two
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-two }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.get:
repository: repository
snapshot: snapshot-one,snapshot-two
- is_true: snapshots
- match: { snapshots.0.state : SUCCESS }
- match: { snapshots.1.state : SUCCESS }
# Delete the index
- do:
indices.delete:
index: docs
# Restore the second snapshot
- do:
snapshot.restore:
repository: repository
snapshot: snapshot-two
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 7}
# Delete the index again
- do:
indices.delete:
index: docs
# Restore the first snapshot
- do:
snapshot.restore:
repository: repository
snapshot: snapshot-one
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 3}
# Remove the snapshots
- do:
snapshot.delete:
repository: repository
snapshot: snapshot-two
- do:
snapshot.delete:
repository: repository
snapshot: snapshot-one
# Remove our repository
- do:
snapshot.delete_repository:
repository: repository

View File

@ -0,0 +1,18 @@
# Integration tests for repository-gcs
#
"Plugin repository-gcs is loaded":
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
- do:
nodes.info: {}
- match: { nodes.$master.plugins.0.name: repository-gcs }
--
"Snapshot/Restore with repository-gcs":
- do:
create.index: