Move `throttle_retries` under `repositories.s3.` prefix or per repository

I initially wrongly put this setting under `cloud.aws.s3.` prefix which does not make sense. It should be placed at the same place as `max_retries`.

Also applied @tlrx comments. We should set this even if max_retries is not set (when using default values).

Also added some documentation about this setting.
This commit is contained in:
David Pilato 2016-05-19 16:50:37 +02:00
parent faa3c6ef3c
commit e289de6e96
7 changed files with 45 additions and 19 deletions

View File

@ -39,7 +39,8 @@ http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
credentials for authentication. These can be overridden by, in increasing credentials for authentication. These can be overridden by, in increasing
order of precedence, system properties `aws.accessKeyId` and `aws.secretKey`, order of precedence, system properties `aws.accessKeyId` and `aws.secretKey`,
environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY`, or the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY`, or the
elasticsearch config using `cloud.aws.access_key` and `cloud.aws.secret_key`: elasticsearch config using `cloud.aws.access_key` and `cloud.aws.secret_key` or
if you wish to set credentials specifically for s3 `cloud.aws.s3.access_key` and `cloud.aws.s3.secret_key`:
[source,yaml] [source,yaml]
---- ----
@ -106,6 +107,7 @@ cloud:
===== Region ===== Region
The `cloud.aws.region` can be set to a region and will automatically use the relevant settings for both `ec2` and `s3`. The `cloud.aws.region` can be set to a region and will automatically use the relevant settings for both `ec2` and `s3`.
You can specifically set it for s3 only using `cloud.aws.s3.region`.
The available values are: The available values are:
* `us-east` (`us-east-1`) * `us-east` (`us-east-1`)
@ -216,9 +218,13 @@ The following settings are supported:
Number of retries in case of S3 errors. Defaults to `3`. Number of retries in case of S3 errors. Defaults to `3`.
`throttle_retries`::
Set to `true` if you want to throttle retries. Defaults to AWS SDK default value (`false`).
`read_only`:: `read_only`::
Makes repository read-only. coming[2.1.0] Defaults to `false`. Makes repository read-only. Defaults to `false`.
`canned_acl`:: `canned_acl`::
@ -236,6 +242,9 @@ The following settings are supported:
currently supported by the plugin. For more information about the currently supported by the plugin. For more information about the
different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide]
Note that you can define S3 repository settings for all S3 repositories in `elasticsearch.yml` configuration file.
They are all prefixed with `repositories.s3.`.
The S3 repositories use the same credentials as the rest of the AWS services The S3 repositories use the same credentials as the rest of the AWS services
provided by this plugin (`discovery`). See <<repository-s3-usage>> for details. provided by this plugin (`discovery`). See <<repository-s3-usage>> for details.

View File

@ -151,11 +151,8 @@ public interface AwsS3Service extends LifecycleComponent<AwsS3Service> {
* cloud.aws.s3.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. * cloud.aws.s3.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting.
*/ */
Setting<String> ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope); Setting<String> ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope);
/**
* cloud.aws.s3.throttle_retries: Set to `true` if you want to throttle retries. Defaults to `true`.
*/
Setting<Boolean> THROTTLE_RETRIES_SETTING = Setting.boolSetting("cloud.aws.s3.throttle_retries", true, Property.NodeScope);
} }
AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries); AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries,
boolean throttleRetries);
} }

View File

@ -57,7 +57,8 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
} }
@Override @Override
public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries) { public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries,
boolean throttleRetries) {
if (Strings.isNullOrEmpty(endpoint)) { if (Strings.isNullOrEmpty(endpoint)) {
// We need to set the endpoint based on the region // We need to set the endpoint based on the region
if (region != null) { if (region != null) {
@ -69,10 +70,11 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
} }
} }
return getClient(endpoint, protocol, account, key, maxRetries); return getClient(endpoint, protocol, account, key, maxRetries, throttleRetries);
} }
private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries) { private synchronized AmazonS3 getClient(String endpoint, Protocol protocol, String account, String key, Integer maxRetries,
boolean throttleRetries) {
Tuple<String, String> clientDescriptor = new Tuple<>(endpoint, account); Tuple<String, String> clientDescriptor = new Tuple<>(endpoint, account);
AmazonS3Client client = clients.get(clientDescriptor); AmazonS3Client client = clients.get(clientDescriptor);
if (client != null) { if (client != null) {
@ -101,8 +103,8 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
if (maxRetries != null) { if (maxRetries != null) {
// If not explicitly set, default to 3 with exponential backoff policy // If not explicitly set, default to 3 with exponential backoff policy
clientConfiguration.setMaxErrorRetry(maxRetries); clientConfiguration.setMaxErrorRetry(maxRetries);
clientConfiguration.setUseThrottleRetries(AwsS3Service.CLOUD_S3.THROTTLE_RETRIES_SETTING.get(settings));
} }
clientConfiguration.setUseThrottleRetries(throttleRetries);
// #155: we might have 3rd party users using older S3 API version // #155: we might have 3rd party users using older S3 API version
String awsSigner = CLOUD_S3.SIGNER_SETTING.get(settings); String awsSigner = CLOUD_S3.SIGNER_SETTING.get(settings);

View File

@ -112,7 +112,6 @@ public class S3RepositoryPlugin extends Plugin {
settingsModule.registerSetting(AwsS3Service.CLOUD_S3.SIGNER_SETTING); settingsModule.registerSetting(AwsS3Service.CLOUD_S3.SIGNER_SETTING);
settingsModule.registerSetting(AwsS3Service.CLOUD_S3.REGION_SETTING); settingsModule.registerSetting(AwsS3Service.CLOUD_S3.REGION_SETTING);
settingsModule.registerSetting(AwsS3Service.CLOUD_S3.ENDPOINT_SETTING); settingsModule.registerSetting(AwsS3Service.CLOUD_S3.ENDPOINT_SETTING);
settingsModule.registerSetting(AwsS3Service.CLOUD_S3.THROTTLE_RETRIES_SETTING);
// Register S3 repositories settings: repositories.s3 // Register S3 repositories settings: repositories.s3
settingsModule.registerSetting(S3Repository.Repositories.KEY_SETTING); settingsModule.registerSetting(S3Repository.Repositories.KEY_SETTING);
@ -129,6 +128,7 @@ public class S3RepositoryPlugin extends Plugin {
settingsModule.registerSetting(S3Repository.Repositories.STORAGE_CLASS_SETTING); settingsModule.registerSetting(S3Repository.Repositories.STORAGE_CLASS_SETTING);
settingsModule.registerSetting(S3Repository.Repositories.CANNED_ACL_SETTING); settingsModule.registerSetting(S3Repository.Repositories.CANNED_ACL_SETTING);
settingsModule.registerSetting(S3Repository.Repositories.BASE_PATH_SETTING); settingsModule.registerSetting(S3Repository.Repositories.BASE_PATH_SETTING);
settingsModule.registerSetting(S3Repository.Repositories.THROTTLE_RETRIES_SETTING);
// Register S3 single repository settings // Register S3 single repository settings
settingsModule.registerSetting(S3Repository.Repository.KEY_SETTING); settingsModule.registerSetting(S3Repository.Repository.KEY_SETTING);
@ -145,6 +145,7 @@ public class S3RepositoryPlugin extends Plugin {
settingsModule.registerSetting(S3Repository.Repository.STORAGE_CLASS_SETTING); settingsModule.registerSetting(S3Repository.Repository.STORAGE_CLASS_SETTING);
settingsModule.registerSetting(S3Repository.Repository.CANNED_ACL_SETTING); settingsModule.registerSetting(S3Repository.Repository.CANNED_ACL_SETTING);
settingsModule.registerSetting(S3Repository.Repository.BASE_PATH_SETTING); settingsModule.registerSetting(S3Repository.Repository.BASE_PATH_SETTING);
settingsModule.registerSetting(S3Repository.Repository.THROTTLE_RETRIES_SETTING);
} }
/** /**

View File

@ -19,6 +19,7 @@
package org.elasticsearch.repositories.s3; package org.elasticsearch.repositories.s3;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol; import com.amazonaws.Protocol;
import org.elasticsearch.cloud.aws.AwsS3Service; import org.elasticsearch.cloud.aws.AwsS3Service;
import org.elasticsearch.cloud.aws.AwsS3Service.CLOUD_S3; import org.elasticsearch.cloud.aws.AwsS3Service.CLOUD_S3;
@ -115,6 +116,11 @@ public class S3Repository extends BlobStoreRepository {
* repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. * repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3.
*/ */
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope); Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope);
/**
* repositories.s3.throttle_retries: Set to `true` if you want to throttle retries. Defaults to AWS SDK default value (`false`).
*/
Setting<Boolean> THROTTLE_RETRIES_SETTING = Setting.boolSetting("repositories.s3.throttle_retries",
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope);
/** /**
* repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g. * repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
*/ */
@ -196,6 +202,12 @@ public class S3Repository extends BlobStoreRepository {
* @see Repositories#MAX_RETRIES_SETTING * @see Repositories#MAX_RETRIES_SETTING
*/ */
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.NodeScope); Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.NodeScope);
/**
* throttle_retries
* @see Repositories#THROTTLE_RETRIES_SETTING
*/
Setting<Boolean> THROTTLE_RETRIES_SETTING = Setting.boolSetting("throttle_retries",
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope);
/** /**
* chunk_size * chunk_size
* @see Repositories#CHUNK_SIZE_SETTING * @see Repositories#CHUNK_SIZE_SETTING
@ -262,6 +274,7 @@ public class S3Repository extends BlobStoreRepository {
boolean serverSideEncryption = getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING); boolean serverSideEncryption = getValue(repositorySettings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING);
ByteSizeValue bufferSize = getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING); ByteSizeValue bufferSize = getValue(repositorySettings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING);
Integer maxRetries = getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING); Integer maxRetries = getValue(repositorySettings, Repository.MAX_RETRIES_SETTING, Repositories.MAX_RETRIES_SETTING);
boolean throttleRetries = getValue(repositorySettings, Repository.THROTTLE_RETRIES_SETTING, Repositories.THROTTLE_RETRIES_SETTING);
this.chunkSize = getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING); this.chunkSize = getValue(repositorySettings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING);
this.compress = getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING); this.compress = getValue(repositorySettings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING);
@ -275,13 +288,15 @@ public class S3Repository extends BlobStoreRepository {
String storageClass = getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING); String storageClass = getValue(repositorySettings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING);
String cannedACL = getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING); String cannedACL = getValue(repositorySettings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING);
logger.debug("using bucket [{}], region [{}], endpoint [{}], protocol [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], max_retries [{}], cannedACL [{}], storageClass [{}]", logger.debug("using bucket [{}], region [{}], endpoint [{}], protocol [{}], chunk_size [{}], server_side_encryption [{}], " +
bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); "buffer_size [{}], max_retries [{}], throttle_retries [{}], cannedACL [{}], storageClass [{}]",
bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries, throttleRetries, cannedACL,
storageClass);
String key = getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING); String key = getValue(repositorySettings, Repository.KEY_SETTING, Repositories.KEY_SETTING);
String secret = getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING); String secret = getValue(repositorySettings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING);
blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, key, secret, maxRetries), blobStore = new S3BlobStore(settings, s3Service.client(endpoint, protocol, region, key, secret, maxRetries, throttleRetries),
bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass);
String basePath = getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); String basePath = getValue(repositorySettings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING);

View File

@ -51,8 +51,9 @@ public class TestAwsS3Service extends InternalAwsS3Service {
@Override @Override
public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries) { public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries,
return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries)); boolean throttleRetries) {
return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries, throttleRetries));
} }
private AmazonS3 cachedWrapper(AmazonS3 client) { private AmazonS3 cachedWrapper(AmazonS3 client) {

View File

@ -200,7 +200,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
S3Repository.Repositories.REGION_SETTING.get(settings), S3Repository.Repositories.REGION_SETTING.get(settings),
S3Repository.Repositories.KEY_SETTING.get(settings), S3Repository.Repositories.KEY_SETTING.get(settings),
S3Repository.Repositories.SECRET_SETTING.get(settings), S3Repository.Repositories.SECRET_SETTING.get(settings),
null); null, randomBoolean());
String bucketName = bucket.get("bucket"); String bucketName = bucket.get("bucket");
logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
@ -475,7 +475,8 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
// We check that settings has been set in elasticsearch.yml integration test file // We check that settings has been set in elasticsearch.yml integration test file
// as described in README // as described in README
assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue()); assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue());
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey, null); AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey,
null, randomBoolean());
try { try {
ObjectListing prevListing = null; ObjectListing prevListing = null;
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html