S3 Repository: Remove deprecated settings (#24445)
These settings are deprecated in 5.5. This change removes them for 6.0.
This commit is contained in:
parent
bc223b6aef
commit
0789a74055
|
@ -23,9 +23,24 @@ the region of the configured bucket.
|
|||
|
||||
* Specifying s3 signer type has been removed, including `cloud.aws.signer` and `cloud.aws.s3.signer`.
|
||||
|
||||
* All `cloud.aws` and `repositories.s3` settings have been removed. Use `s3.client.*` settings instead.
|
||||
* Global repositories settings have been removed. This includes `repositories.s3.bucket`,
|
||||
`repositories.s3.server_side_encryption`, `repositories.s3.buffer_size`,
|
||||
`repositories.s3.max_retries`, `repositories.s3.use_throttle_retries`,
|
||||
`repositories.s3.chunk_size`, `repositories.s3.compress`, `repositories.s3.storage_class`,
|
||||
`repositories.s3.canned_acl`, `repositories.s3.base_path`, and
|
||||
`repositories.s3.path_style_access`. Instead, these settings should be set directly in the
|
||||
settings per repository.
|
||||
See {plugins}/repository-s3-repository.html[S3 Repository settings].
|
||||
|
||||
* All repository level client settings have been removed. Use `s3.client.*` settings instead.
|
||||
* Shared client settings have been removed. This includes `cloud.aws.access_key`,
|
||||
`cloud.aws.secret_key`, `cloud.aws.protocol`, `cloud.aws.proxy.host`,
|
||||
`cloud.aws.proxy.port`, `cloud.aws.proxy.username`, `cloud.aws.proxy.password`,
|
||||
`cloud.aws.signer`, `cloud.aws.read_timeout`, `cloud.aws.s3.access_key`,
|
||||
`cloud.aws.s3.secret_key`, `cloud.aws.s3.protocol`, `cloud.aws.s3.proxy.host`,
|
||||
`cloud.aws.s3.proxy.port`, `cloud.aws.s3.proxy.username`, `cloud.aws.s3.proxy.password`,
|
||||
`cloud.aws.s3.signer`, `cloud.aws.s3.read_timeout`, `repositories.s3.access_key`,
|
||||
`repositories.s3.secret_key`, `repositories.s3.endpoint` and `repositories.s3.protocol`.
|
||||
Instead, use the new named client settings under `s3.client.CLIENT_NAME.*`.
|
||||
|
||||
==== Azure Repository plugin
|
||||
|
||||
|
|
|
@ -24,27 +24,20 @@ import java.util.Map;
|
|||
import java.util.function.Function;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
|
||||
import com.amazonaws.http.IdleConnectionReaper;
|
||||
import com.amazonaws.internal.StaticCredentialsProvider;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3Client;
|
||||
import com.amazonaws.services.s3.S3ClientOptions;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.getValue;
|
||||
|
||||
class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service {
|
||||
|
||||
|
@ -70,33 +63,17 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
|
||||
S3ClientSettings clientSettings = clientsSettings.get(clientName);
|
||||
if (clientSettings == null) {
|
||||
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. " +
|
||||
"Existing client configs: " +
|
||||
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " +
|
||||
Strings.collectionToDelimitedString(clientsSettings.keySet(), ","));
|
||||
}
|
||||
|
||||
// If the user defined a path style access setting, we rely on it,
|
||||
// otherwise we use the default value set by the SDK
|
||||
Boolean pathStyleAccess = null;
|
||||
if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(repositorySettings) ||
|
||||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) {
|
||||
pathStyleAccess = getValue(repositorySettings, settings,
|
||||
S3Repository.Repository.PATH_STYLE_ACCESS_SETTING,
|
||||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING);
|
||||
}
|
||||
|
||||
logger.debug("creating S3 client with client_name [{}], endpoint [{}], path_style_access [{}]",
|
||||
clientName, clientSettings.endpoint, pathStyleAccess);
|
||||
logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint);
|
||||
|
||||
AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
|
||||
ClientConfiguration configuration = buildConfiguration(clientSettings, repositorySettings);
|
||||
|
||||
client = new AmazonS3Client(credentials, configuration);
|
||||
|
||||
if (pathStyleAccess != null) {
|
||||
client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess));
|
||||
}
|
||||
|
||||
if (Strings.hasText(clientSettings.endpoint)) {
|
||||
client.setEndpoint(clientSettings.endpoint);
|
||||
}
|
||||
|
@ -121,14 +98,8 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
clientConfiguration.setProxyPassword(clientSettings.proxyPassword);
|
||||
}
|
||||
|
||||
Integer maxRetries = getRepoValue(repositorySettings, S3Repository.Repository.MAX_RETRIES_SETTING, clientSettings.maxRetries);
|
||||
if (maxRetries != null) {
|
||||
// If not explicitly set, default to 3 with exponential backoff policy
|
||||
clientConfiguration.setMaxErrorRetry(maxRetries);
|
||||
}
|
||||
boolean useThrottleRetries = getRepoValue(repositorySettings,
|
||||
S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, clientSettings.throttleRetries);
|
||||
clientConfiguration.setUseThrottleRetries(useThrottleRetries);
|
||||
clientConfiguration.setMaxErrorRetry(clientSettings.maxRetries);
|
||||
clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries);
|
||||
clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis);
|
||||
|
||||
return clientConfiguration;
|
||||
|
@ -145,14 +116,6 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the value for a given setting from the repository, or returns the fallback value. */
|
||||
private static <T> T getRepoValue(Settings repositorySettings, Setting<T> repositorySetting, T fallback) {
|
||||
if (repositorySetting.exists(repositorySettings)) {
|
||||
return repositorySetting.get(repositorySettings);
|
||||
}
|
||||
return fallback;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws ElasticsearchException {
|
||||
}
|
||||
|
|
|
@ -81,11 +81,11 @@ class S3ClientSettings {
|
|||
|
||||
/** The number of retries to use when an s3 request fails. */
|
||||
static final Setting.AffixSetting<Integer> MAX_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "max_retries",
|
||||
key -> Setting.intSetting(key, S3Repository.Repositories.MAX_RETRIES_SETTING, 0, Property.NodeScope));
|
||||
key -> Setting.intSetting(key, ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry(), 0, Property.NodeScope));
|
||||
|
||||
/** Whether retries should be throttled (ie use backoff). */
|
||||
static final Setting.AffixSetting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "use_throttle_retries",
|
||||
key -> Setting.boolSetting(key, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING, Property.NodeScope));
|
||||
key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope));
|
||||
|
||||
/** Credentials to authenticate with s3. */
|
||||
final BasicAWSCredentials credentials;
|
||||
|
|
|
@ -51,156 +51,66 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
|||
*/
|
||||
class S3Repository extends BlobStoreRepository {
|
||||
|
||||
public static final String TYPE = "s3";
|
||||
static final String TYPE = "s3";
|
||||
|
||||
/**
|
||||
* Global S3 repositories settings. Starting with: repositories.s3
|
||||
* NOTE: These are legacy settings. Use the named client config settings above.
|
||||
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of
|
||||
* the available memory for smaller heaps.
|
||||
*/
|
||||
public interface Repositories {
|
||||
/**
|
||||
* repositories.s3.bucket: The name of the bucket to be used for snapshots.
|
||||
*/
|
||||
Setting<String> BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm.
|
||||
* Defaults to false.
|
||||
*/
|
||||
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING =
|
||||
Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope, Property.Deprecated);
|
||||
private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
|
||||
Math.max(
|
||||
ByteSizeUnit.MB.toBytes(5), // minimum value
|
||||
Math.min(
|
||||
ByteSizeUnit.MB.toBytes(100),
|
||||
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)),
|
||||
ByteSizeUnit.BYTES);
|
||||
|
||||
/**
|
||||
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of
|
||||
* the available memory for smaller heaps.
|
||||
*/
|
||||
ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
|
||||
Math.max(
|
||||
ByteSizeUnit.MB.toBytes(5), // minimum value
|
||||
Math.min(
|
||||
ByteSizeUnit.MB.toBytes(100),
|
||||
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)),
|
||||
ByteSizeUnit.BYTES);
|
||||
|
||||
/**
|
||||
* repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
|
||||
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and
|
||||
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the
|
||||
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size.
|
||||
*/
|
||||
Setting<ByteSizeValue> BUFFER_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("repositories.s3.buffer_size", DEFAULT_BUFFER_SIZE,
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3.
|
||||
*/
|
||||
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.use_throttle_retries: Set to `true` if you want to throttle retries. Defaults to AWS SDK default value (`false`).
|
||||
*/
|
||||
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("repositories.s3.use_throttle_retries",
|
||||
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
|
||||
*/
|
||||
Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index
|
||||
* files that are already compressed by default. Defaults to false.
|
||||
*/
|
||||
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
|
||||
* standard_ia. Defaults to standard.
|
||||
*/
|
||||
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write,
|
||||
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private.
|
||||
*/
|
||||
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory.
|
||||
*/
|
||||
Setting<String> BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.path_style_access: When set to true configures the client to use path-style access for all requests.
|
||||
Amazon S3 supports virtual-hosted-style and path-style access in all Regions. The path-style syntax, however,
|
||||
requires that you use the region-specific endpoint when attempting to access a bucket.
|
||||
The default behaviour is to detect which access style to use based on the configured endpoint (an IP will result
|
||||
in path-style access) and the bucket being accessed (some buckets are not valid DNS names). Setting this flag
|
||||
will result in path-style access being used for all requests.
|
||||
*/
|
||||
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("repositories.s3.path_style_access", false,
|
||||
Property.NodeScope, Property.Deprecated);
|
||||
}
|
||||
static final Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");
|
||||
|
||||
/**
|
||||
* Per S3 repository specific settings. Same settings as Repositories settings but without the repositories.s3 prefix.
|
||||
* If undefined, they use the repositories.s3.xxx equivalent setting.
|
||||
* When set to true files are encrypted on server side using AES256 algorithm.
|
||||
* Defaults to false.
|
||||
*/
|
||||
public interface Repository {
|
||||
static final Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false);
|
||||
|
||||
Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");
|
||||
/**
|
||||
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
|
||||
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and
|
||||
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the
|
||||
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size.
|
||||
*/
|
||||
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", DEFAULT_BUFFER_SIZE,
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
|
||||
/**
|
||||
* server_side_encryption
|
||||
* @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING
|
||||
*/
|
||||
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false);
|
||||
/**
|
||||
* Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
|
||||
*/
|
||||
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
|
||||
/**
|
||||
* buffer_size
|
||||
* @see Repositories#BUFFER_SIZE_SETTING
|
||||
*/
|
||||
Setting<ByteSizeValue> BUFFER_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("buffer_size", Repositories.DEFAULT_BUFFER_SIZE,
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
/**
|
||||
* max_retries
|
||||
* @see Repositories#MAX_RETRIES_SETTING
|
||||
*/
|
||||
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.Deprecated);
|
||||
/**
|
||||
* use_throttle_retries
|
||||
* @see Repositories#USE_THROTTLE_RETRIES_SETTING
|
||||
*/
|
||||
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("use_throttle_retries",
|
||||
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.Deprecated);
|
||||
/**
|
||||
* chunk_size
|
||||
* @see Repositories#CHUNK_SIZE_SETTING
|
||||
*/
|
||||
Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
/**
|
||||
* compress
|
||||
* @see Repositories#COMPRESS_SETTING
|
||||
*/
|
||||
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false);
|
||||
/**
|
||||
* storage_class
|
||||
* @see Repositories#STORAGE_CLASS_SETTING
|
||||
*/
|
||||
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class");
|
||||
/**
|
||||
* canned_acl
|
||||
* @see Repositories#CANNED_ACL_SETTING
|
||||
*/
|
||||
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");
|
||||
/**
|
||||
* base_path
|
||||
* @see Repositories#BASE_PATH_SETTING
|
||||
*/
|
||||
Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
|
||||
/**
|
||||
* path_style_access
|
||||
* @see Repositories#PATH_STYLE_ACCESS_SETTING
|
||||
*/
|
||||
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("path_style_access", false, Property.Deprecated);
|
||||
}
|
||||
/**
|
||||
* When set to true metadata files are stored in compressed format. This setting doesn’t affect index
|
||||
* files that are already compressed by default. Defaults to false.
|
||||
*/
|
||||
static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false);
|
||||
|
||||
/**
|
||||
* Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
|
||||
* standard_ia. Defaults to standard.
|
||||
*/
|
||||
static final Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class");
|
||||
|
||||
/**
|
||||
* The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write,
|
||||
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private.
|
||||
*/
|
||||
static final Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");
|
||||
|
||||
/**
|
||||
* Specifies the path within bucket to repository data. Defaults to root directory.
|
||||
*/
|
||||
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
|
||||
|
||||
private final S3BlobStore blobStore;
|
||||
|
||||
|
@ -217,25 +127,25 @@ class S3Repository extends BlobStoreRepository {
|
|||
NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException {
|
||||
super(metadata, settings, namedXContentRegistry);
|
||||
|
||||
String bucket = getValue(metadata.settings(), settings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING);
|
||||
String bucket = BUCKET_SETTING.get(metadata.settings());
|
||||
if (bucket == null) {
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway");
|
||||
}
|
||||
|
||||
boolean serverSideEncryption = getValue(metadata.settings(), settings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING);
|
||||
ByteSizeValue bufferSize = getValue(metadata.settings(), settings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING);
|
||||
this.chunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING);
|
||||
this.compress = getValue(metadata.settings(), settings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING);
|
||||
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
|
||||
ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
|
||||
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
|
||||
this.compress = COMPRESS_SETTING.get(metadata.settings());
|
||||
|
||||
// We make sure that chunkSize is bigger or equal than/to bufferSize
|
||||
if (this.chunkSize.getBytes() < bufferSize.getBytes()) {
|
||||
throw new RepositoryException(metadata.name(), Repository.CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize +
|
||||
") can't be lower than " + Repository.BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
|
||||
throw new RepositoryException(metadata.name(), CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize +
|
||||
") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
|
||||
}
|
||||
|
||||
// Parse and validate the user's S3 Storage Class setting
|
||||
String storageClass = getValue(metadata.settings(), settings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING);
|
||||
String cannedACL = getValue(metadata.settings(), settings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING);
|
||||
String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
|
||||
String cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
|
||||
|
||||
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
|
||||
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
|
@ -244,13 +154,8 @@ class S3Repository extends BlobStoreRepository {
|
|||
AmazonS3 client = s3Service.client(metadata.settings());
|
||||
blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
|
||||
String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING);
|
||||
String basePath = BASE_PATH_SETTING.get(metadata.settings());
|
||||
if (Strings.hasLength(basePath)) {
|
||||
if (basePath.startsWith("/")) {
|
||||
basePath = basePath.substring(1);
|
||||
deprecationLogger.deprecated("S3 repository base_path trimming the leading `/`, and " +
|
||||
"leading `/` will not be supported for the S3 repository in future releases");
|
||||
}
|
||||
this.basePath = new BlobPath().add(basePath);
|
||||
} else {
|
||||
this.basePath = BlobPath.cleanPath();
|
||||
|
@ -276,15 +181,4 @@ class S3Repository extends BlobStoreRepository {
|
|||
protected ByteSizeValue chunkSize() {
|
||||
return chunkSize;
|
||||
}
|
||||
|
||||
public static <T> T getValue(Settings repositorySettings,
|
||||
Settings globalSettings,
|
||||
Setting<T> repositorySetting,
|
||||
Setting<T> repositoriesSetting) {
|
||||
if (repositorySetting.exists(repositorySettings)) {
|
||||
return repositorySetting.get(repositorySettings);
|
||||
} else {
|
||||
return repositoriesSetting.get(globalSettings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(
|
||||
|
||||
// named s3 client configuration settings
|
||||
S3ClientSettings.ACCESS_KEY_SETTING,
|
||||
S3ClientSettings.SECRET_KEY_SETTING,
|
||||
|
@ -91,19 +90,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
S3ClientSettings.PROXY_PASSWORD_SETTING,
|
||||
S3ClientSettings.READ_TIMEOUT_SETTING,
|
||||
S3ClientSettings.MAX_RETRIES_SETTING,
|
||||
S3ClientSettings.USE_THROTTLE_RETRIES_SETTING,
|
||||
|
||||
// Register S3 repositories settings: repositories.s3
|
||||
S3Repository.Repositories.BUCKET_SETTING,
|
||||
S3Repository.Repositories.SERVER_SIDE_ENCRYPTION_SETTING,
|
||||
S3Repository.Repositories.BUFFER_SIZE_SETTING,
|
||||
S3Repository.Repositories.MAX_RETRIES_SETTING,
|
||||
S3Repository.Repositories.CHUNK_SIZE_SETTING,
|
||||
S3Repository.Repositories.COMPRESS_SETTING,
|
||||
S3Repository.Repositories.STORAGE_CLASS_SETTING,
|
||||
S3Repository.Repositories.CANNED_ACL_SETTING,
|
||||
S3Repository.Repositories.BASE_PATH_SETTING,
|
||||
S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING,
|
||||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING);
|
||||
S3ClientSettings.USE_THROTTLE_RETRIES_SETTING);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,43 +52,29 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
@ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, transportClientRatio = 0.0)
|
||||
public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase {
|
||||
|
||||
@Override
|
||||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
// nodeSettings is called before `wipeBefore()` so we need to define basePath here
|
||||
globalBasePath = "repo-" + randomInt();
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(S3Repository.Repositories.BASE_PATH_SETTING.getKey(), globalBasePath)
|
||||
.build();
|
||||
}
|
||||
|
||||
private String basePath;
|
||||
private String globalBasePath;
|
||||
|
||||
@Before
|
||||
public final void wipeBefore() {
|
||||
wipeRepositories();
|
||||
basePath = "repo-" + randomInt();
|
||||
cleanRepositoryFiles(basePath);
|
||||
cleanRepositoryFiles(globalBasePath);
|
||||
}
|
||||
|
||||
@After
|
||||
public final void wipeAfter() {
|
||||
wipeRepositories();
|
||||
cleanRepositoryFiles(basePath);
|
||||
cleanRepositoryFiles(globalBasePath);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
|
||||
public void testSimpleWorkflow() {
|
||||
Client client = client();
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000));
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000));
|
||||
|
||||
// We sometime test getting the base_path from node settings using repositories.s3.base_path
|
||||
if (usually()) {
|
||||
settings.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath);
|
||||
}
|
||||
settings.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath);
|
||||
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
|
@ -163,10 +149,9 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000))
|
||||
.put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true)
|
||||
.put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), randomBoolean())
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000))
|
||||
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
|
@ -257,8 +242,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
try {
|
||||
client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
fail("repository verification should have raise an exception!");
|
||||
} catch (RepositoryVerificationException e) {
|
||||
|
@ -269,7 +254,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
Client client = client();
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -282,8 +267,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -297,8 +282,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repostoriy with endpoint [{}], bucket[{}] and path [{}]", bucketSettings.get("endpoint"), bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
assertRepositoryIsOperational(client, "test-repo");
|
||||
|
@ -315,8 +300,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
try {
|
||||
client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
// Below setting intentionally omitted to assert bucket is not available in default region.
|
||||
// .put("region", privateBucketSettings.get("region"))
|
||||
).get();
|
||||
|
@ -333,8 +318,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -349,7 +334,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -370,7 +355,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository without any path");
|
||||
PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -460,8 +445,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
// We check that settings has been set in elasticsearch.yml integration test file
|
||||
// as described in README
|
||||
assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue());
|
||||
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(
|
||||
Settings.builder().put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), randomBoolean()).build());
|
||||
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY);
|
||||
try {
|
||||
ObjectListing prevListing = null;
|
||||
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
|
||||
|
|
|
@ -87,17 +87,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
"aws_proxy_password", 3, false, 10000);
|
||||
}
|
||||
|
||||
public void testGlobalMaxRetriesBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null,
|
||||
null, 10, false, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.MAX_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testRepositoryMaxRetries() {
|
||||
Settings settings = Settings.builder()
|
||||
.put("s3.client.default.max_retries", 5)
|
||||
|
@ -106,31 +95,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
null, 5, false, 50000);
|
||||
}
|
||||
|
||||
public void testRepositoryMaxRetriesBackcompat() {
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.Repository.MAX_RETRIES_SETTING.getKey(), 20).build();
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, repositorySettings, Protocol.HTTPS, null, -1, null,
|
||||
null, 20, false, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.MAX_RETRIES_SETTING,
|
||||
S3Repository.Repository.MAX_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testGlobalThrottleRetriesBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null,
|
||||
null, 3, true, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testRepositoryThrottleRetries() {
|
||||
Settings settings = Settings.builder()
|
||||
.put("s3.client.default.use_throttle_retries", true)
|
||||
|
@ -139,20 +103,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
null, 3, true, 50000);
|
||||
}
|
||||
|
||||
public void testRepositoryThrottleRetriesBackcompat() {
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), true).build();
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING.getKey(), false)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, repositorySettings, Protocol.HTTPS, null, -1, null,
|
||||
null, 3, true, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING,
|
||||
S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
private void launchAWSConfigurationTest(Settings settings,
|
||||
Settings singleRepositorySettings,
|
||||
Protocol expectedProtocol,
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import com.amazonaws.services.s3.AbstractAmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -33,11 +33,6 @@ import org.elasticsearch.repositories.RepositoryException;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.Repositories;
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.Repository;
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.getValue;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class S3RepositoryTests extends ESTestCase {
|
||||
|
@ -82,15 +77,15 @@ public class S3RepositoryTests extends ESTestCase {
|
|||
|
||||
private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
}
|
||||
|
||||
private void assertInvalidBuffer(int bufferMB, int chunkMB, Class<? extends Exception> clazz, String msg) throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
|
||||
Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY,
|
||||
new DummyS3Service()));
|
||||
|
@ -99,26 +94,14 @@ public class S3RepositoryTests extends ESTestCase {
|
|||
|
||||
public void testBasePathSetting() throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(Repository.BASE_PATH_SETTING.getKey(), "/foo/bar").build());
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build());
|
||||
S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added
|
||||
assertWarnings("S3 repository base_path" +
|
||||
" trimming the leading `/`, and leading `/` will not be supported for the S3 repository in future releases");
|
||||
metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY);
|
||||
Settings settings = Settings.builder().put(Repositories.BASE_PATH_SETTING.getKey(), "/foo/bar").build();
|
||||
s3repo = new S3Repository(metadata, settings, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] { Repositories.BASE_PATH_SETTING },
|
||||
"S3 repository base_path" +
|
||||
" trimming the leading `/`, and leading `/` will not be supported for the S3 repository in future releases");
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString());
|
||||
}
|
||||
|
||||
public void testDefaultBufferSize() {
|
||||
ByteSizeValue defaultBufferSize = S3Repository.Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
ByteSizeValue defaultBufferSize = S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB)));
|
||||
assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB)));
|
||||
|
||||
ByteSizeValue defaultNodeBufferSize = S3Repository.Repositories.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
assertEquals(defaultBufferSize, defaultNodeBufferSize);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue