Merge branch 'aws/base_path'

This commit is contained in:
David Pilato 2015-08-13 15:29:00 +02:00
commit 48ef9e3cfb
3 changed files with 13 additions and 6 deletions

View File

@ -187,7 +187,7 @@ The following settings are supported:
* `region`: The region where bucket is located. Defaults to US Standard * `region`: The region where bucket is located. Defaults to US Standard
* `endpoint`: The endpoint to the S3 API. Defaults to AWS's default S3 endpoint. Note that setting a region overrides the endpoint setting. * `endpoint`: The endpoint to the S3 API. Defaults to AWS's default S3 endpoint. Note that setting a region overrides the endpoint setting.
* `protocol`: The protocol to use (`http` or `https`). Defaults to value of `cloud.aws.protocol` or `cloud.aws.s3.protocol`. * `protocol`: The protocol to use (`http` or `https`). Defaults to value of `cloud.aws.protocol` or `cloud.aws.s3.protocol`.
* `base_path`: Specifies the path within bucket to repository data. Defaults to root directory. * `base_path`: Specifies the path within bucket to repository data. Defaults to value of `repositories.s3.base_path` or to root directory if not set.
* `access_key`: The access key to use for authentication. Defaults to value of `cloud.aws.access_key`. * `access_key`: The access key to use for authentication. Defaults to value of `cloud.aws.access_key`.
* `secret_key`: The secret key to use for authentication. Defaults to value of `cloud.aws.secret_key`. * `secret_key`: The secret key to use for authentication. Defaults to value of `cloud.aws.secret_key`.
* `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `100m`. * `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `100m`.

View File

@ -123,7 +123,7 @@ public class S3Repository extends BlobStoreRepository {
bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries); bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries);
blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, repositorySettings.settings().get("access_key"), repositorySettings.settings().get("secret_key"), maxRetries), bucket, region, serverSideEncryption, bufferSize, maxRetries); blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, repositorySettings.settings().get("access_key"), repositorySettings.settings().get("secret_key"), maxRetries), bucket, region, serverSideEncryption, bufferSize, maxRetries);
String basePath = repositorySettings.settings().get("base_path", null); String basePath = repositorySettings.settings().get("base_path", settings.get("repositories.s3.base_path"));
if (Strings.hasLength(basePath)) { if (Strings.hasLength(basePath)) {
BlobPath path = new BlobPath(); BlobPath path = new BlobPath();
for(String elem : Strings.splitStringToArray(basePath, '/')) { for(String elem : Strings.splitStringToArray(basePath, '/')) {

View File

@ -64,6 +64,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
.put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false) .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false)
.put("cloud.enabled", true) .put("cloud.enabled", true)
.put("plugin.types", CloudAwsPlugin.class.getName()) .put("plugin.types", CloudAwsPlugin.class.getName())
.put("repositories.s3.base_path", basePath)
.build(); .build();
} }
@ -85,11 +86,17 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
@Test @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211") @Test @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
public void testSimpleWorkflow() { public void testSimpleWorkflow() {
Client client = client(); Client client = client();
Settings.Builder settings = Settings.settingsBuilder()
.put("chunk_size", randomIntBetween(1000, 10000));
// We sometime test getting the base_path from node settings using repositories.s3.base_path
if (usually()) {
settings.put("base_path", basePath);
}
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
.setType("s3").setSettings(Settings.settingsBuilder() .setType("s3").setSettings(settings
.put("base_path", basePath)
.put("chunk_size", randomIntBetween(1000, 10000))
).get(); ).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
@ -342,7 +349,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
.setType("s3").setSettings(Settings.settingsBuilder() .setType("s3").setSettings(Settings.settingsBuilder()
.put("base_path", basePath) .put("base_path", basePath)
).get(); ).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
logger.info("--> restore non existing snapshot"); logger.info("--> restore non existing snapshot");