Allow per-repo protocols

Also add an integration test for custom endpoints/protocols.
This commit is contained in:
Bruno Renié 2014-11-21 17:49:33 +01:00
parent 2203f439e2
commit 30d80cc27d
6 changed files with 52 additions and 24 deletions

View File

@ -164,6 +164,7 @@ The following settings are supported:
* `bucket`: The name of the bucket to be used for snapshots. (Mandatory)
* `region`: The region where bucket is located. Defaults to US Standard
* `endpoint`: The endpoint to the S3 API. Defaults to AWS's default S3 endpoint. Note that setting a region overrides the endpoint setting.
* `protocol`: The protocol to use (`http` or `https`). Defaults to `https`.
* `base_path`: Specifies the path within bucket to repository data. Defaults to root directory.
* `access_key`: The access key to use for authentication. Defaults to value of `cloud.aws.access_key`.
* `secret_key`: The secret key to use for authentication. Defaults to value of `cloud.aws.secret_key`.
@ -290,10 +291,16 @@ repositories:
remote-bucket:
bucket: <bucket in other region>
region: <region>
external-bucket:
bucket: <bucket>
access_key: <access key>
secret_key: <secret key>
endpoint: <endpoint>
protocol: <protocol>
```
Replace all occurrences of `access_key`, `secret_key`, `bucket` and `region` with your settings. Please, note that the test will delete all snapshot/restore related files in the specified buckets.
Replace all occurrences of `access_key`, `secret_key`, `endpoint`, `protocol`, `bucket` and `region` with your settings. Please, note that the test will delete all snapshot/restore related files in the specified buckets.
To run test:

View File

@ -28,7 +28,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
public interface AwsS3Service extends LifecycleComponent<AwsS3Service> {
AmazonS3 client();
AmazonS3 client(String endpoint, String region, String account, String key);
AmazonS3 client(String endpoint, String protocol, String region, String account, String key);
AmazonS3 client(String endpoint, String region, String account, String key, Integer maxRetries);
AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries);
}

View File

@ -60,34 +60,32 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
String account = componentSettings.get("access_key", settings.get("cloud.account"));
String key = componentSettings.get("secret_key", settings.get("cloud.key"));
return getClient(endpoint, account, key, null);
return getClient(endpoint, "https", account, key, null);
}
@Override
public AmazonS3 client(String endpoint, String region, String account, String key) {
return client(endpoint, region, account, key, null);
public AmazonS3 client(String endpoint, String protocol, String region, String account, String key) {
return client(endpoint, protocol, region, account, key, null);
}
@Override
public synchronized AmazonS3 client(String endpoint, String region, String account, String key, Integer maxRetries) {
if (endpoint == null) {
endpoint = getDefaultEndpoint();
}
if (region != null) {
public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries) {
if (region != null && endpoint == null) {
endpoint = getEndpoint(region);
logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint);
} else if (endpoint == null) {
endpoint = getDefaultEndpoint();
}
if (account == null || key == null) {
account = componentSettings.get("access_key", settings.get("cloud.account"));
key = componentSettings.get("secret_key", settings.get("cloud.key"));
}
return getClient(endpoint, account, key, maxRetries);
return getClient(endpoint, protocol, account, key, maxRetries);
}
private synchronized AmazonS3 getClient(String endpoint, String account, String key, Integer maxRetries) {
private synchronized AmazonS3 getClient(String endpoint, String protocol, String account, String key, Integer maxRetries) {
Tuple<String, String> clientDescriptor = new Tuple<String, String>(endpoint, account);
AmazonS3Client client = clients.get(clientDescriptor);
if (client != null) {
@ -95,8 +93,10 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
}
ClientConfiguration clientConfiguration = new ClientConfiguration();
String protocol = componentSettings.get("protocol", "https").toLowerCase();
protocol = componentSettings.get("s3.protocol", protocol).toLowerCase();
if (protocol == null) {
protocol = "https";
}
if ("http".equals(protocol)) {
clientConfiguration.setProtocol(Protocol.HTTP);
} else if ("https".equals(protocol)) {

View File

@ -80,6 +80,9 @@ public class S3Repository extends BlobStoreRepository {
}
String endpoint = repositorySettings.settings().get("endpoint", componentSettings.get("endpoint"));
String protocol = componentSettings.get("protocol", "https").toLowerCase();
protocol = componentSettings.get("s3.protocol", protocol).toLowerCase();
protocol = repositorySettings.settings().get("protocol", protocol);
String region = repositorySettings.settings().get("region", componentSettings.get("region"));
if (region == null) {
@ -126,10 +129,10 @@ public class S3Repository extends BlobStoreRepository {
this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", componentSettings.getAsBytesSize("chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB)));
this.compress = repositorySettings.settings().getAsBoolean("compress", componentSettings.getAsBoolean("compress", false));
logger.debug("using bucket [{}], region [{}], endpoint [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], max_retries [{}]",
bucket, region, endpoint, chunkSize, serverSideEncryption, bufferSize, maxRetries);
logger.debug("using bucket [{}], region [{}], endpoint [{}], protocol [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], max_retries [{}]",
bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries);
blobStore = new S3BlobStore(settings, s3Service.client(endpoint, region, repositorySettings.settings().get("access_key"), repositorySettings.settings().get("secret_key"), maxRetries), bucket, region, serverSideEncryption, bufferSize, maxRetries);
blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, repositorySettings.settings().get("access_key"), repositorySettings.settings().get("secret_key"), maxRetries), bucket, region, serverSideEncryption, bufferSize, maxRetries);
String basePath = repositorySettings.settings().get("base_path", null);
if (Strings.hasLength(basePath)) {
BlobPath path = new BlobPath();

View File

@ -45,13 +45,13 @@ public class TestAwsS3Service extends InternalAwsS3Service {
}
@Override
public synchronized AmazonS3 client(String endpoint, String region, String account, String key) {
return cachedWrapper(super.client(endpoint, region, account, key));
public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key));
}
@Override
public synchronized AmazonS3 client(String region, String account, String key, Integer maxRetries) {
return cachedWrapper(super.client(region, account, key, maxRetries));
public synchronized AmazonS3 client(String endpoint, String protocol, String region, String account, String key, Integer maxRetries) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries));
}
private AmazonS3 cachedWrapper(AmazonS3 client) {

View File

@ -193,6 +193,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
Settings bucket = settings.getByPrefix("repositories.s3.");
AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(
bucket.get("endpoint", settings.get("repositories.s3.endpoint")),
bucket.get("protocol", settings.get("repositories.s3.protocol")),
bucket.get("region", settings.get("repositories.s3.region")),
bucket.get("access_key", settings.get("cloud.aws.access_key")),
bucket.get("secret_key", settings.get("cloud.aws.secret_key")));
@ -279,6 +280,22 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
assertRepositoryIsOperational(client, "test-repo");
}
@Test
public void testRepositoryWithCustomEndpointProtocol() {
Client client = client();
Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.external-bucket.");
logger.info("--> creating s3 repostoriy with endpoint [{}], bucket[{}] and path [{}]", bucketSettings.get("endpoint"), bucketSettings.get("bucket"), basePath);
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
.setType("s3").setSettings(ImmutableSettings.settingsBuilder()
.put("protocol", bucketSettings.get("protocol"))
.put("endpoint", bucketSettings.get("endpoint"))
.put("access_key", bucketSettings.get("access_key"))
.put("secret_key", bucketSettings.get("secret_key"))
).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
assertRepositoryIsOperational(client, "test-repo");
}
/**
* This test verifies that the test configuration is set up in a manner that
* does not make the test {@link #testRepositoryInRemoteRegion()} pointless.
@ -432,6 +449,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
};
for (Settings bucket : buckets) {
String endpoint = bucket.get("endpoint", settings.get("repositories.s3.endpoint"));
String protocol = bucket.get("protocol", settings.get("repositories.s3.protocol"));
String region = bucket.get("region", settings.get("repositories.s3.region"));
String accessKey = bucket.get("access_key", settings.get("cloud.aws.access_key"));
String secretKey = bucket.get("secret_key", settings.get("cloud.aws.secret_key"));
@ -440,7 +458,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
// We check that settings has been set in elasticsearch.yml integration test file
// as described in README
assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue());
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, region, accessKey, secretKey);
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey);
try {
ObjectListing prevListing = null;
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html