Use ESBlobStoreRepositoryIntegTestCase to test the repository-s3 plugin (#29315)

This commit adds the S3BlobStoreRepositoryTests class that extends the
base testing class for S3. It also removes some usage of socket servers 
that emulate socket connections in unit tests. It was added to trigger 
security exceptions, but this won't be needed anymore since #29296 
is merged.
This commit is contained in:
Tanguy Leroux 2018-04-05 13:34:02 +02:00 committed by GitHub
parent dccd43af47
commit d813a05b9f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 278 additions and 219 deletions

View File

@ -151,8 +151,7 @@ class S3Repository extends BlobStoreRepository {
/**
* Constructs an s3 backed repository
*/
S3Repository(RepositoryMetaData metadata, Settings settings,
NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException {
S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) {
super(metadata, settings, namedXContentRegistry);
String bucket = BUCKET_SETTING.get(metadata.settings());

View File

@ -20,14 +20,14 @@
package org.elasticsearch.repositories.s3;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.s3.AbstractAmazonS3;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CopyObjectRequest;
import com.amazonaws.services.s3.model.CopyObjectResult;
import com.amazonaws.services.s3.model.DeleteObjectRequest;
import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.DeleteObjectsResult;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
@ -37,197 +37,163 @@ import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.net.InetAddress;
import java.net.Socket;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.Objects;
import java.util.concurrent.ConcurrentMap;
import static org.junit.Assert.assertTrue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
class MockAmazonS3 extends AbstractAmazonS3 {
private final int mockSocketPort;
private final ConcurrentMap<String, byte[]> blobs;
private final String bucket;
private final boolean serverSideEncryption;
private final String cannedACL;
private final String storageClass;
private Map<String, InputStream> blobs = new ConcurrentHashMap<>();
// in ESBlobStoreContainerTestCase.java, the maximum
// length of the input data is 100 bytes
private byte[] byteCounter = new byte[100];
MockAmazonS3(int mockSocketPort) {
this.mockSocketPort = mockSocketPort;
}
// Simulate a socket connection to check that SocketAccess.doPrivileged() is used correctly.
// Any method of AmazonS3 might potentially open a socket to the S3 service. Firstly, a call
// to any method of AmazonS3 has to be wrapped by SocketAccess.doPrivileged().
// Secondly, each method on the stack from doPrivileged to opening the socket has to be
// located in a jar that is provided by the plugin.
// Thirdly, a SocketPermission has to be configured in plugin-security.policy.
// By opening a socket in each method of MockAmazonS3 it is ensured that in production AmazonS3
// is able to to open a socket to the S3 Service without causing a SecurityException
private void simulateS3SocketConnection() {
try (Socket socket = new Socket(InetAddress.getByName("127.0.0.1"), mockSocketPort)) {
assertTrue(socket.isConnected()); // NOOP to keep static analysis happy
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public boolean doesBucketExist(String bucket) {
return true;
MockAmazonS3(final ConcurrentMap<String, byte[]> blobs,
final String bucket,
final boolean serverSideEncryption,
final String cannedACL,
final String storageClass) {
this.blobs = Objects.requireNonNull(blobs);
this.bucket = Objects.requireNonNull(bucket);
this.serverSideEncryption = serverSideEncryption;
this.cannedACL = cannedACL;
this.storageClass = storageClass;
}
@Override
public boolean doesObjectExist(String bucketName, String objectName) throws AmazonServiceException, SdkClientException {
simulateS3SocketConnection();
public boolean doesBucketExist(final String bucket) {
return this.bucket.equalsIgnoreCase(bucket);
}
@Override
public boolean doesObjectExist(final String bucketName, final String objectName) throws SdkClientException {
assertThat(bucketName, equalTo(bucket));
return blobs.containsKey(objectName);
}
@Override
public ObjectMetadata getObjectMetadata(
GetObjectMetadataRequest getObjectMetadataRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String blobName = getObjectMetadataRequest.getKey();
public PutObjectResult putObject(final PutObjectRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));
assertThat(request.getMetadata().getSSEAlgorithm(), serverSideEncryption ? equalTo("AES256") : nullValue());
assertThat(request.getCannedAcl(), notNullValue());
assertThat(request.getCannedAcl().toString(), cannedACL != null ? equalTo(cannedACL) : equalTo("private"));
assertThat(request.getStorageClass(), storageClass != null ? equalTo(storageClass) : equalTo("STANDARD"));
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
final String blobName = request.getKey();
final ByteArrayOutputStream out = new ByteArrayOutputStream();
try {
Streams.copy(request.getInputStream(), out);
blobs.put(blobName, out.toByteArray());
} catch (IOException e) {
throw new AmazonClientException(e);
}
return new ObjectMetadata(); // nothing is done with it
}
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String blobName = putObjectRequest.getKey();
if (blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] already exists.");
}
blobs.put(blobName, putObjectRequest.getInputStream());
return new PutObjectResult();
}
@Override
public S3Object getObject(GetObjectRequest getObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
// in ESBlobStoreContainerTestCase.java, the prefix is empty,
// so the key and blobName are equivalent to each other
String blobName = getObjectRequest.getKey();
public S3Object getObject(final GetObjectRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
final String blobName = request.getKey();
final byte[] content = blobs.get(blobName);
if (content == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist.");
exception.setStatusCode(404);
throw exception;
}
// the HTTP request attribute is irrelevant for reading
S3ObjectInputStream stream = new S3ObjectInputStream(
blobs.get(blobName), null, false);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(content.length);
S3Object s3Object = new S3Object();
s3Object.setObjectContent(stream);
s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(content), null, false));
s3Object.setKey(blobName);
s3Object.setObjectMetadata(metadata);
return s3Object;
}
@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
MockObjectListing list = new MockObjectListing();
list.setTruncated(false);
public ObjectListing listObjects(final ListObjectsRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));
String blobName;
String prefix = listObjectsRequest.getPrefix();
final ObjectListing listing = new ObjectListing();
listing.setBucketName(request.getBucketName());
listing.setPrefix(request.getPrefix());
ArrayList<S3ObjectSummary> mockObjectSummaries = new ArrayList<>();
for (Map.Entry<String, InputStream> blob : blobs.entrySet()) {
blobName = blob.getKey();
S3ObjectSummary objectSummary = new S3ObjectSummary();
if (prefix.isEmpty() || blobName.startsWith(prefix)) {
objectSummary.setKey(blobName);
try {
objectSummary.setSize(getSize(blob.getValue()));
} catch (IOException e) {
throw new AmazonS3Exception("Object listing " +
"failed for blob [" + blob.getKey() + "]");
}
mockObjectSummaries.add(objectSummary);
for (Map.Entry<String, byte[]> blob : blobs.entrySet()) {
if (Strings.isEmpty(request.getPrefix()) || blob.getKey().startsWith(request.getPrefix())) {
S3ObjectSummary summary = new S3ObjectSummary();
summary.setBucketName(request.getBucketName());
summary.setKey(blob.getKey());
summary.setSize(blob.getValue().length);
listing.getObjectSummaries().add(summary);
}
}
list.setObjectSummaries(mockObjectSummaries);
return list;
return listing;
}
@Override
public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String sourceBlobName = copyObjectRequest.getSourceKey();
String targetBlobName = copyObjectRequest.getDestinationKey();
public CopyObjectResult copyObject(final CopyObjectRequest request) throws AmazonClientException {
assertThat(request.getSourceBucketName(), equalTo(bucket));
assertThat(request.getDestinationBucketName(), equalTo(bucket));
if (!blobs.containsKey(sourceBlobName)) {
throw new AmazonS3Exception("Source blob [" +
sourceBlobName + "] does not exist.");
final String sourceBlobName = request.getSourceKey();
final byte[] content = blobs.get(sourceBlobName);
if (content == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + sourceBlobName + "] does not exist.");
exception.setStatusCode(404);
throw exception;
}
if (blobs.containsKey(targetBlobName)) {
throw new AmazonS3Exception("Target blob [" +
targetBlobName + "] already exists.");
}
blobs.put(targetBlobName, blobs.get(sourceBlobName));
return new CopyObjectResult(); // nothing is done with it
blobs.put(request.getDestinationKey(), content);
return new CopyObjectResult();
}
@Override
public void deleteObject(DeleteObjectRequest deleteObjectRequest)
throws AmazonClientException, AmazonServiceException {
simulateS3SocketConnection();
String blobName = deleteObjectRequest.getKey();
public void deleteObject(final DeleteObjectRequest request) throws AmazonClientException {
assertThat(request.getBucketName(), equalTo(bucket));
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
final String blobName = request.getKey();
if (blobs.remove(blobName) == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist.");
exception.setStatusCode(404);
throw exception;
}
blobs.remove(blobName);
}
private int getSize(InputStream stream) throws IOException {
int size = stream.read(byteCounter);
stream.reset(); // in case we ever need the size again
return size;
}
private class MockObjectListing extends ObjectListing {
// the objectSummaries attribute in ObjectListing.java
// is read-only, but we need to be able to write to it,
// so we create a mock of it to work around this
private List<S3ObjectSummary> mockObjectSummaries;
@Override
public List<S3ObjectSummary> getObjectSummaries() {
return mockObjectSummaries;
}
public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) throws SdkClientException {
assertThat(request.getBucketName(), equalTo(bucket));
private void setObjectSummaries(List<S3ObjectSummary> objectSummaries) {
mockObjectSummaries = objectSummaries;
final List<DeleteObjectsResult.DeletedObject> deletions = new ArrayList<>();
for (DeleteObjectsRequest.KeyVersion key : request.getKeys()) {
if (blobs.remove(key.getKey()) == null) {
AmazonS3Exception exception = new AmazonS3Exception("[" + key + "] does not exist.");
exception.setStatusCode(404);
throw exception;
} else {
DeleteObjectsResult.DeletedObject deletion = new DeleteObjectsResult.DeletedObject();
deletion.setKey(key.getKey());
deletions.add(deletion);
}
}
return new DeleteObjectsResult(deletions);
}
}

View File

@ -37,26 +37,19 @@ import com.amazonaws.services.s3.model.UploadPartResult;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.mocksocket.MockServerSocket;
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.mockito.ArgumentCaptor;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.elasticsearch.repositories.s3.S3BlobStoreTests.randomMockS3BlobStore;
import static org.hamcrest.Matchers.instanceOf;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
@ -67,36 +60,11 @@ import static org.mockito.Mockito.when;
public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
private static ServerSocket mockS3ServerSocket;
private static Thread mockS3AcceptorThread;
// Opens a MockSocket to simulate connections to S3 checking that SocketPermissions are set up correctly.
// See MockAmazonS3.simulateS3SocketConnection.
@BeforeClass
public static void openMockSocket() throws IOException {
mockS3ServerSocket = new MockServerSocket(0, 50, InetAddress.getByName("127.0.0.1"));
mockS3AcceptorThread = new Thread(() -> {
while (!mockS3ServerSocket.isClosed()) {
try {
// Accept connections from MockAmazonS3.
mockS3ServerSocket.accept();
} catch (IOException e) {
}
}
});
mockS3AcceptorThread.start();
protected BlobStore newBlobStore() {
return randomMockS3BlobStore();
}
protected BlobStore newBlobStore() throws IOException {
MockAmazonS3 client = new MockAmazonS3(mockS3ServerSocket.getLocalPort());
String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
return new S3BlobStore(Settings.EMPTY, client, bucket, false,
new ByteSizeValue(10, ByteSizeUnit.MB), "public-read-write", "standard");
}
public void testExecuteSingleUploadBlobSizeTooLarge() throws IOException {
public void testExecuteSingleUploadBlobSizeTooLarge() {
final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(6, 10));
final S3BlobStore blobStore = mock(S3BlobStore.class);
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
@ -106,7 +74,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage());
}
public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() throws IOException {
public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() {
final S3BlobStore blobStore = mock(S3BlobStore.class);
when(blobStore.bufferSizeInBytes()).thenReturn(ByteSizeUnit.MB.toBytes(1));
@ -168,7 +136,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
}
}
public void testExecuteMultipartUploadBlobSizeTooLarge() throws IOException {
public void testExecuteMultipartUploadBlobSizeTooLarge() {
final long blobSize = ByteSizeUnit.TB.toBytes(randomIntBetween(6, 10));
final S3BlobStore blobStore = mock(S3BlobStore.class);
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
@ -179,7 +147,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage());
}
public void testExecuteMultipartUploadBlobSizeTooSmall() throws IOException {
public void testExecuteMultipartUploadBlobSizeTooSmall() {
final long blobSize = ByteSizeUnit.MB.toBytes(randomIntBetween(1, 4));
final S3BlobStore blobStore = mock(S3BlobStore.class);
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
@ -291,7 +259,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
assertEquals(expectedEtags, actualETags);
}
public void testExecuteMultipartUploadAborted() throws IOException {
public void testExecuteMultipartUploadAborted() {
final String bucketName = randomAlphaOfLengthBetween(1, 10);
final String blobName = randomAlphaOfLengthBetween(1, 10);
final BlobPath blobPath = new BlobPath();
@ -418,12 +386,4 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
assertEquals("Expected number of parts [" + expectedParts + "] but got [" + result.v1() + "]", expectedParts, (long) result.v1());
assertEquals("Expected remaining [" + expectedRemaining + "] but got [" + result.v2() + "]", expectedRemaining, (long) result.v2());
}
@AfterClass
public static void closeMockSocket() throws IOException, InterruptedException {
mockS3ServerSocket.close();
mockS3AcceptorThread.join();
mockS3AcceptorThread = null;
mockS3ServerSocket = null;
}
}

View File

@ -0,0 +1,109 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.s3;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.StorageClass;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.util.Collection;
import java.util.Collections;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase {
private static final ConcurrentMap<String, byte[]> blobs = new ConcurrentHashMap<>();
private static String bucket;
private static String client;
private static ByteSizeValue bufferSize;
private static boolean serverSideEncryption;
private static String cannedACL;
private static String storageClass;
@BeforeClass
public static void setUpRepositorySettings() {
bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
client = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
bufferSize = new ByteSizeValue(randomIntBetween(5, 50), ByteSizeUnit.MB);
serverSideEncryption = randomBoolean();
if (randomBoolean()) {
cannedACL = randomFrom(CannedAccessControlList.values()).toString();
}
if (randomBoolean()) {
storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString();
}
}
@AfterClass
public static void wipeRepository() {
blobs.clear();
}
@Override
protected void createTestRepository(final String name) {
assertAcked(client().admin().cluster().preparePutRepository(name)
.setType(S3Repository.TYPE)
.setSettings(Settings.builder()
.put(S3Repository.BUCKET_SETTING.getKey(), bucket)
.put(InternalAwsS3Service.CLIENT_NAME.getKey(), client)
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize)
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption)
.put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL)
.put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass)));
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(TestS3RepositoryPlugin.class);
}
public static class TestS3RepositoryPlugin extends S3RepositoryPlugin {
public TestS3RepositoryPlugin(final Settings settings) {
super(settings);
}
@Override
public Map<String, Repository.Factory> getRepositories(final Environment env, final NamedXContentRegistry registry) {
return Collections.singletonMap(S3Repository.TYPE, (metadata) ->
new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings(), emptyMap()) {
@Override
public synchronized AmazonS3 client(final Settings repositorySettings) {
return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass);
}
}));
}
}
}

View File

@ -19,18 +19,29 @@
package org.elasticsearch.repositories.s3;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.CannedAccessControlList;
import com.amazonaws.services.s3.model.StorageClass;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.BlobStoreException;
import org.elasticsearch.repositories.s3.S3BlobStore;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.repositories.ESBlobStoreTestCase;
import java.io.IOException;
import java.util.Locale;
import java.util.concurrent.ConcurrentHashMap;
import static org.hamcrest.Matchers.equalTo;
public class S3BlobStoreTests extends ESTestCase {
public void testInitCannedACL() throws IOException {
public class S3BlobStoreTests extends ESBlobStoreTestCase {
@Override
protected BlobStore newBlobStore() {
return randomMockS3BlobStore();
}
public void testInitCannedACL() {
String[] aclList = new String[]{
"private", "public-read", "public-read-write", "authenticated-read",
"log-delivery-write", "bucket-owner-read", "bucket-owner-full-control"};
@ -52,16 +63,12 @@ public class S3BlobStoreTests extends ESTestCase {
}
}
public void testInvalidCannedACL() throws IOException {
try {
S3BlobStore.initCannedACL("test_invalid");
fail("CannedACL should fail");
} catch (BlobStoreException ex) {
public void testInvalidCannedACL() {
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initCannedACL("test_invalid"));
assertThat(ex.getMessage(), equalTo("cannedACL is not valid: [test_invalid]"));
}
}
public void testInitStorageClass() throws IOException {
public void testInitStorageClass() {
// it should default to `standard`
assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard));
assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard));
@ -72,25 +79,43 @@ public class S3BlobStoreTests extends ESTestCase {
assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy));
}
public void testCaseInsensitiveStorageClass() throws IOException {
public void testCaseInsensitiveStorageClass() {
assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard));
assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess));
assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy));
}
public void testInvalidStorageClass() throws IOException {
try {
S3BlobStore.initStorageClass("whatever");
} catch(BlobStoreException ex) {
public void testInvalidStorageClass() {
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("whatever"));
assertThat(ex.getMessage(), equalTo("`whatever` is not a valid S3 Storage Class."));
}
}
public void testRejectGlacierStorageClass() throws IOException {
try {
S3BlobStore.initStorageClass("glacier");
} catch(BlobStoreException ex) {
public void testRejectGlacierStorageClass() {
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("glacier"));
assertThat(ex.getMessage(), equalTo("Glacier storage class is not supported"));
}
/**
* Creates a new {@link S3BlobStore} with random settings.
* <p>
* The blobstore uses a {@link MockAmazonS3} client.
*/
public static S3BlobStore randomMockS3BlobStore() {
String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
ByteSizeValue bufferSize = new ByteSizeValue(randomIntBetween(5, 100), ByteSizeUnit.MB);
boolean serverSideEncryption = randomBoolean();
String cannedACL = null;
if (randomBoolean()) {
cannedACL = randomFrom(CannedAccessControlList.values()).toString();
}
String storageClass = null;
if (randomBoolean()) {
storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString();
}
AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass);
return new S3BlobStore(Settings.EMPTY, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
}
}