Merge AwsS3Service and InternalAwsS3Service in a S3Service class (#31580)

The interface and its implementation can be merged into a single class,
which is renamed to S3Service like the other S3BlobStore, S3Repository
classes.
This commit is contained in:
Tanguy Leroux 2018-06-28 10:40:53 +02:00 committed by GitHub
parent 8557bbab28
commit 9d523d05ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 64 additions and 199 deletions

View File

@ -1,43 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.s3;
import java.io.Closeable;
import java.util.Map;
interface AwsS3Service extends Closeable {
/**
* Creates then caches an {@code AmazonS3} client using the current client
* settings. Returns an {@code AmazonS3Reference} wrapper which has to be
* released as soon as it is not needed anymore.
*/
AmazonS3Reference client(String clientName);
/**
* Updates settings for building clients and clears the client cache. Future
* client requests will use the new settings to lazily build new clients.
*
* @param clientsSettings the new refreshed settings
* @return the old stale settings
*/
Map<String, S3ClientSettings> refreshAndClearCache(Map<String, S3ClientSettings> clientsSettings);
}

View File

@ -25,7 +25,6 @@ import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.model.StorageClass;
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
@ -40,7 +39,7 @@ import java.util.Locale;
class S3BlobStore extends AbstractComponent implements BlobStore {
private final AwsS3Service service;
private final S3Service service;
private final String clientName;
@ -54,7 +53,7 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
private final StorageClass storageClass;
S3BlobStore(Settings settings, AwsS3Service service, String clientName, String bucket, boolean serverSideEncryption,
S3BlobStore(Settings settings, S3Service service, String clientName, String bucket, boolean serverSideEncryption,
ByteSizeValue bufferSize, String cannedACL, String storageClass) {
super(settings);
this.service = service;

View File

@ -20,7 +20,6 @@
package org.elasticsearch.repositories.s3;
import com.amazonaws.auth.BasicAWSCredentials;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobPath;
@ -156,8 +155,10 @@ class S3Repository extends BlobStoreRepository {
/**
* Constructs an s3 backed repository
*/
S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry,
AwsS3Service awsService) throws IOException {
S3Repository(final RepositoryMetaData metadata,
final Settings settings,
final NamedXContentRegistry namedXContentRegistry,
final S3Service service) throws IOException {
super(metadata, settings, namedXContentRegistry);
final String bucket = BUCKET_SETTING.get(metadata.settings());
@ -188,9 +189,9 @@ class S3Repository extends BlobStoreRepository {
// deprecated behavior: override client credentials from the cluster state
// (repository settings)
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
overrideCredentialsFromClusterState(awsService);
overrideCredentialsFromClusterState(service);
}
blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
blobStore = new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
final String basePath = BASE_PATH_SETTING.get(metadata.settings());
if (Strings.hasLength(basePath)) {
@ -220,13 +221,13 @@ class S3Repository extends BlobStoreRepository {
return chunkSize;
}
void overrideCredentialsFromClusterState(AwsS3Service awsService) {
void overrideCredentialsFromClusterState(final S3Service s3Service) {
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead "
+ "store these in named clients and the elasticsearch keystore for secure settings.");
final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings());
// hack, but that's ok because the whole if branch should be axed
final Map<String, S3ClientSettings> prevSettings = awsService.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY));
final Map<String, S3ClientSettings> prevSettings = s3Service.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY));
final Map<String, S3ClientSettings> newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials);
awsService.refreshAndClearCache(newSettings);
s3Service.refreshAndClearCache(newSettings);
}
}

View File

@ -19,14 +19,6 @@
package org.elasticsearch.repositories.s3;
import java.io.IOException;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import com.amazonaws.util.json.Jackson;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
@ -39,6 +31,15 @@ import org.elasticsearch.plugins.ReloadablePlugin;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.repositories.Repository;
import java.io.IOException;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* A plugin to add a repository type that writes to and from the AWS S3.
*/
@ -60,33 +61,29 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo
});
}
private final AwsS3Service awsS3Service;
private final S3Service service;
public S3RepositoryPlugin(Settings settings) {
this.awsS3Service = getAwsS3Service(settings);
public S3RepositoryPlugin(final Settings settings) {
this(settings, new S3Service(settings));
}
S3RepositoryPlugin(final Settings settings, final S3Service service) {
this.service = Objects.requireNonNull(service, "S3 service must not be null");
// eagerly load client settings so that secure settings are read
final Map<String, S3ClientSettings> clientsSettings = S3ClientSettings.load(settings);
this.awsS3Service.refreshAndClearCache(clientsSettings);
}
protected S3RepositoryPlugin(AwsS3Service awsS3Service) {
this.awsS3Service = awsS3Service;
this.service.refreshAndClearCache(clientsSettings);
}
// proxy method for testing
protected S3Repository getS3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry)
throws IOException {
return new S3Repository(metadata, settings, namedXContentRegistry, awsS3Service);
}
// proxy method for testing
protected AwsS3Service getAwsS3Service(Settings settings) {
return new InternalAwsS3Service(settings);
protected S3Repository createRepository(final RepositoryMetaData metadata,
final Settings settings,
final NamedXContentRegistry registry) throws IOException {
return new S3Repository(metadata, settings, registry, service);
}
@Override
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
return Collections.singletonMap(S3Repository.TYPE, (metadata) -> getS3Repository(metadata, env.settings(), namedXContentRegistry));
public Map<String, Repository.Factory> getRepositories(final Environment env, final NamedXContentRegistry registry) {
return Collections.singletonMap(S3Repository.TYPE, (metadata) -> createRepository(metadata, env.settings(), registry));
}
@Override
@ -112,11 +109,11 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo
public void reload(Settings settings) {
// secure settings should be readable
final Map<String, S3ClientSettings> clientsSettings = S3ClientSettings.load(settings);
awsS3Service.refreshAndClearCache(clientsSettings);
service.refreshAndClearCache(clientsSettings);
}
@Override
public void close() throws IOException {
awsS3Service.close();
service.close();
}
}

View File

@ -28,24 +28,25 @@ import com.amazonaws.http.IdleConnectionReaper;
import com.amazonaws.internal.StaticCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import java.io.Closeable;
import java.io.IOException;
import java.util.Map;
import static java.util.Collections.emptyMap;
class InternalAwsS3Service extends AbstractComponent implements AwsS3Service {
class S3Service extends AbstractComponent implements Closeable {
private volatile Map<String, AmazonS3Reference> clientsCache = emptyMap();
private volatile Map<String, S3ClientSettings> clientsSettings = emptyMap();
InternalAwsS3Service(Settings settings) {
S3Service(Settings settings) {
super(settings);
}
@ -55,7 +56,6 @@ class InternalAwsS3Service extends AbstractComponent implements AwsS3Service {
* clients are usable until released. On release they will be destroyed instead
* to being returned to the cache.
*/
@Override
public synchronized Map<String, S3ClientSettings> refreshAndClearCache(Map<String, S3ClientSettings> clientsSettings) {
// shutdown all unused clients
// others will shutdown on their respective release
@ -71,7 +71,6 @@ class InternalAwsS3Service extends AbstractComponent implements AwsS3Service {
* Attempts to retrieve a client by name from the cache. If the client does not
* exist it will be created.
*/
@Override
public AmazonS3Reference client(String clientName) {
AmazonS3Reference clientReference = clientsCache.get(clientName);
if ((clientReference != null) && clientReference.tryIncRef()) {

View File

@ -65,82 +65,6 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
cleanRepositoryFiles(basePath);
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
public void testSimpleWorkflow() {
Client client = client();
Settings.Builder settings = Settings.builder()
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000));
// We sometime test getting the base_path from node settings using repositories.s3.base_path
settings.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath);
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
.setType("s3").setSettings(settings
).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> delete some data");
for (int i = 0; i < 50; i++) {
client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
}
for (int i = 50; i < 100; i++) {
client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
}
for (int i = 0; i < 100; i += 2) {
client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
}
refresh();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
logger.info("--> close indices");
client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
logger.info("--> restore all indices from the snapshot");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureGreen();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
// Test restore after index deletion
logger.info("--> delete indices");
cluster().wipeIndices("test-idx-1", "test-idx-2");
logger.info("--> restore one index after deletion");
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureGreen();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
public void testEncryption() {
Client client = client();
@ -179,7 +103,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
Settings settings = internalCluster().getInstance(Settings.class);
Settings bucket = settings.getByPrefix("repositories.s3.");
try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) {
try (AmazonS3Reference s3Client = internalCluster().getInstance(S3Service.class).client("default")) {
String bucketName = bucket.get("bucket");
logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
List<S3ObjectSummary> summaries = s3Client.client().listObjects(bucketName, basePath).getObjectSummaries();
@ -442,7 +366,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
// We check that settings has been set in elasticsearch.yml integration test file
// as described in README
assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue());
try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) {
try (AmazonS3Reference s3Client = internalCluster().getInstance(S3Service.class).client("default")) {
ObjectListing prevListing = null;
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
//we can do at most 1K objects per delete

View File

@ -40,8 +40,8 @@ public class AwsS3ServiceImplTests extends ESTestCase {
public void testAWSCredentialsDefaultToInstanceProviders() {
final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, inexistentClientName);
final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, clientSettings);
assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class));
final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, clientSettings);
assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class));
}
public void testAWSCredentialsFromKeystore() {
@ -60,15 +60,15 @@ public class AwsS3ServiceImplTests extends ESTestCase {
for (int i = 0; i < clientsCount; i++) {
final String clientName = clientNamePrefix + i;
final S3ClientSettings someClientSettings = allClientsSettings.get(clientName);
final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, someClientSettings);
final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings);
assertThat(credentialsProvider, instanceOf(StaticCredentialsProvider.class));
assertThat(credentialsProvider.getCredentials().getAWSAccessKeyId(), is(clientName + "_aws_access_key"));
assertThat(credentialsProvider.getCredentials().getAWSSecretKey(), is(clientName + "_aws_secret_key"));
}
// test default exists and is an Instance provider
final S3ClientSettings defaultClientSettings = allClientsSettings.get("default");
final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings);
assertThat(defaultCredentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class));
final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings);
assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class));
}
public void testSetDefaultCredential() {
@ -82,7 +82,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
assertThat(allClientsSettings.size(), is(1));
// test default exists and is an Instance provider
final S3ClientSettings defaultClientSettings = allClientsSettings.get("default");
final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings);
final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings);
assertThat(defaultCredentialsProvider, instanceOf(StaticCredentialsProvider.class));
assertThat(defaultCredentialsProvider.getCredentials().getAWSAccessKeyId(), is(awsAccessKey));
assertThat(defaultCredentialsProvider.getCredentials().getAWSSecretKey(), is(awsSecretKey));
@ -152,7 +152,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
int expectedReadTimeout) {
final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default");
final ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings);
final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings);
assertThat(configuration.getResponseMetadataCacheSize(), is(0));
assertThat(configuration.getProtocol(), is(expectedProtocol));

View File

@ -63,9 +63,9 @@ public class RepositoryCredentialsTests extends ESTestCase {
}
}
static final class ProxyInternalAwsS3Service extends InternalAwsS3Service {
static final class ProxyS3Service extends S3Service {
ProxyInternalAwsS3Service(Settings settings) {
ProxyS3Service(Settings settings) {
super(settings);
}
@ -77,15 +77,9 @@ public class RepositoryCredentialsTests extends ESTestCase {
}
protected ProxyS3RepositoryPlugin(Settings settings) {
super(settings);
ProxyS3RepositoryPlugin(Settings settings) {
super(settings, new ProxyS3Service(settings));
}
@Override
protected AwsS3Service getAwsS3Service(Settings settings) {
return new ProxyInternalAwsS3Service(settings);
}
}
public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException {
@ -108,7 +102,7 @@ public class RepositoryCredentialsTests extends ESTestCase {
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key")
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build());
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials();
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
@ -131,7 +125,7 @@ public class RepositoryCredentialsTests extends ESTestCase {
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret")
.build());
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY);
S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials();
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
@ -162,7 +156,7 @@ public class RepositoryCredentialsTests extends ESTestCase {
}
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build());
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) {
S3Repository s3repo = s3Plugin.createRepository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) {
try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials
.getCredentials();

View File

@ -110,14 +110,14 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
@Override
public Map<String, Repository.Factory> getRepositories(final Environment env, final NamedXContentRegistry registry) {
return Collections.singletonMap(S3Repository.TYPE,
(metadata) -> new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings()) {
(metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service(env.settings()) {
@Override
public synchronized AmazonS3Reference client(String clientName) {
return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass));
}
}) {
@Override
void overrideCredentialsFromClusterState(AwsS3Service awsService) {
void overrideCredentialsFromClusterState(S3Service awsService) {
}
});
}

View File

@ -117,7 +117,7 @@ public class S3BlobStoreTests extends ESBlobStoreTestCase {
final String theClientName = randomAlphaOfLength(4);
final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass);
final AwsS3Service service = new InternalAwsS3Service(Settings.EMPTY) {
final S3Service service = new S3Service(Settings.EMPTY) {
@Override
public synchronized AmazonS3Reference client(String clientName) {
assert theClientName.equals(clientName);

View File

@ -20,9 +20,7 @@
package org.elasticsearch.repositories.s3;
import com.amazonaws.services.s3.AbstractAmazonS3;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -30,6 +28,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
@ -51,16 +50,11 @@ public class S3RepositoryTests extends ESTestCase {
}
}
private static class DummyS3Service extends AbstractLifecycleComponent implements AwsS3Service {
private static class DummyS3Service extends S3Service {
DummyS3Service() {
super(Settings.EMPTY);
}
@Override
protected void doStart() {}
@Override
protected void doStop() {}
@Override
protected void doClose() {}
@Override
public AmazonS3Reference client(String clientName) {
return new AmazonS3Reference(new DummyS3Client());

View File

@ -24,10 +24,10 @@ import java.util.IdentityHashMap;
import com.amazonaws.services.s3.AmazonS3;
import org.elasticsearch.common.settings.Settings;
public class TestAwsS3Service extends InternalAwsS3Service {
public class TestAwsS3Service extends S3Service {
public static class TestPlugin extends S3RepositoryPlugin {
public TestPlugin(Settings settings) {
super(new TestAwsS3Service(settings));
super(settings, new TestAwsS3Service(settings));
}
}