Reload secure settings for plugins (#31383)
Adds the ability to reread and decrypt the local node keystore. Commonly, the contents of the keystore, backing the `SecureSettings`, are not retrievable except during node initialization. This changes that by adding a new API which broadcasts a password to every node. The password is used to decrypt the local keystore and use it to populate a `Settings` object that is passes to all the plugins implementing the `ReloadablePlugin` interface. The plugin is then responsible to do whatever "reload" means in his case. When the `reload`handler returns, the keystore is closed and its contents are no longer retrievable. Password is never stored persistently on any node. Plugins that have been moded in this commit are: `repository-azure`, `repository-s3`, `repository-gcs` and `discovery-ec2`.
This commit is contained in:
parent
16fa6b270f
commit
3378240b29
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
|
||||
/**
|
||||
* Handles the shutdown of the wrapped {@link AmazonEC2} using reference
|
||||
* counting.
|
||||
*/
|
||||
public class AmazonEc2Reference extends AbstractRefCounted implements Releasable {
|
||||
|
||||
private final AmazonEC2 client;
|
||||
|
||||
AmazonEc2Reference(AmazonEC2 client) {
|
||||
super("AWS_EC2_CLIENT");
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call when the client is not needed anymore.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
decRef();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the underlying `AmazonEC2` client. All method calls are permitted BUT
|
||||
* NOT shutdown. Shutdown is called when reference count reaches 0.
|
||||
*/
|
||||
public AmazonEC2 client() {
|
||||
return client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeInternal() {
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
}
|
|
@ -19,22 +19,17 @@
|
|||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.function.Function;
|
||||
|
||||
interface AwsEc2Service {
|
||||
interface AwsEc2Service extends Closeable {
|
||||
Setting<Boolean> AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, Property.NodeScope);
|
||||
|
||||
class HostType {
|
||||
|
@ -45,36 +40,6 @@ interface AwsEc2Service {
|
|||
public static final String TAG_PREFIX = "tag:";
|
||||
}
|
||||
|
||||
/** The access key (ie login id) for connecting to ec2. */
|
||||
Setting<SecureString> ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", null);
|
||||
|
||||
/** The secret key (ie password) for connecting to ec2. */
|
||||
Setting<SecureString> SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null);
|
||||
|
||||
/** An override for the ec2 endpoint to connect to. */
|
||||
Setting<String> ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "",
|
||||
s -> s.toLowerCase(Locale.ROOT), Property.NodeScope);
|
||||
|
||||
/** The protocol to use to connect to to ec2. */
|
||||
Setting<Protocol> PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https",
|
||||
s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope);
|
||||
|
||||
/** The host name of a proxy to connect to ec2 through. */
|
||||
Setting<String> PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope);
|
||||
|
||||
/** The port of a proxy to connect to ec2 through. */
|
||||
Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1<<16, Property.NodeScope);
|
||||
|
||||
/** The username of a proxy to connect to s3 through. */
|
||||
Setting<SecureString> PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null);
|
||||
|
||||
/** The password of a proxy to connect to s3 through. */
|
||||
Setting<SecureString> PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null);
|
||||
|
||||
/** The socket timeout for connecting to s3. */
|
||||
Setting<TimeValue> READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout",
|
||||
TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope);
|
||||
|
||||
/**
|
||||
* discovery.ec2.host_type: The type of host type to use to communicate with other instances.
|
||||
* Can be one of private_ip, public_ip, private_dns, public_dns or tag:XXXX where
|
||||
|
@ -87,26 +52,24 @@ interface AwsEc2Service {
|
|||
* discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the
|
||||
* discovery. Defaults to true.
|
||||
*/
|
||||
Setting<Boolean> ANY_GROUP_SETTING =
|
||||
Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope);
|
||||
Setting<Boolean> ANY_GROUP_SETTING = Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope);
|
||||
/**
|
||||
* discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided
|
||||
* security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.)
|
||||
*/
|
||||
Setting<List<String>> GROUPS_SETTING =
|
||||
Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), Property.NodeScope);
|
||||
Setting<List<String>> GROUPS_SETTING = Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(),
|
||||
Property.NodeScope);
|
||||
/**
|
||||
* discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within
|
||||
* the provided availability zones will be used in the cluster discovery.
|
||||
*/
|
||||
Setting<List<String>> AVAILABILITY_ZONES_SETTING =
|
||||
Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(),
|
||||
Property.NodeScope);
|
||||
Setting<List<String>> AVAILABILITY_ZONES_SETTING = Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(),
|
||||
s -> s.toString(), Property.NodeScope);
|
||||
/**
|
||||
* discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s.
|
||||
*/
|
||||
Setting<TimeValue> NODE_CACHE_TIME_SETTING =
|
||||
Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), Property.NodeScope);
|
||||
Setting<TimeValue> NODE_CACHE_TIME_SETTING = Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10),
|
||||
Property.NodeScope);
|
||||
|
||||
/**
|
||||
* discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups).
|
||||
|
@ -115,7 +78,22 @@ interface AwsEc2Service {
|
|||
* instance to be included.
|
||||
*/
|
||||
Setting.AffixSetting<List<String>> TAG_SETTING = Setting.prefixKeySetting("discovery.ec2.tag.",
|
||||
key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope));
|
||||
key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope));
|
||||
|
||||
/**
|
||||
* Builds then caches an {@code AmazonEC2} client using the current client
|
||||
* settings. Returns an {@code AmazonEc2Reference} wrapper which should be
|
||||
* released as soon as it is not required anymore.
|
||||
*/
|
||||
AmazonEc2Reference client();
|
||||
|
||||
/**
|
||||
* Updates the settings for building the client and releases the cached one.
|
||||
* Future client requests will use the new settings to lazily built the new
|
||||
* client.
|
||||
*
|
||||
* @param clientSettings the new refreshed settings
|
||||
*/
|
||||
void refreshAndClearCache(Ec2ClientSettings clientSettings);
|
||||
|
||||
AmazonEC2 client();
|
||||
}
|
||||
|
|
|
@ -19,12 +19,9 @@
|
|||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.AmazonWebServiceRequest;
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
|
@ -35,112 +32,117 @@ import com.amazonaws.retry.RetryPolicy;
|
|||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import com.amazonaws.services.ec2.AmazonEC2Client;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.LazyInitializable;
|
||||
|
||||
class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Closeable {
|
||||
class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service {
|
||||
|
||||
public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/";
|
||||
|
||||
private AmazonEC2Client client;
|
||||
private final AtomicReference<LazyInitializable<AmazonEc2Reference, ElasticsearchException>> lazyClientReference =
|
||||
new AtomicReference<>();
|
||||
|
||||
AwsEc2ServiceImpl(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized AmazonEC2 client() {
|
||||
if (client != null) {
|
||||
return client;
|
||||
private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) {
|
||||
final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
|
||||
final ClientConfiguration configuration = buildConfiguration(logger, clientSettings);
|
||||
final AmazonEC2 client = buildClient(credentials, configuration);
|
||||
if (Strings.hasText(clientSettings.endpoint)) {
|
||||
logger.debug("using explicit ec2 endpoint [{}]", clientSettings.endpoint);
|
||||
client.setEndpoint(clientSettings.endpoint);
|
||||
}
|
||||
|
||||
this.client = new AmazonEC2Client(buildCredentials(logger, settings), buildConfiguration(logger, settings));
|
||||
String endpoint = findEndpoint(logger, settings);
|
||||
if (endpoint != null) {
|
||||
client.setEndpoint(endpoint);
|
||||
}
|
||||
|
||||
return this.client;
|
||||
return client;
|
||||
}
|
||||
|
||||
protected static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings) {
|
||||
AWSCredentialsProvider credentials;
|
||||
|
||||
try (SecureString key = ACCESS_KEY_SETTING.get(settings);
|
||||
SecureString secret = SECRET_KEY_SETTING.get(settings)) {
|
||||
if (key.length() == 0 && secret.length() == 0) {
|
||||
logger.debug("Using either environment variables, system properties or instance profile credentials");
|
||||
credentials = new DefaultAWSCredentialsProviderChain();
|
||||
} else {
|
||||
logger.debug("Using basic key/secret credentials");
|
||||
credentials = new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
return credentials;
|
||||
// proxy for testing
|
||||
AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
final AmazonEC2 client = new AmazonEC2Client(credentials, configuration);
|
||||
return client;
|
||||
}
|
||||
|
||||
protected static ClientConfiguration buildConfiguration(Logger logger, Settings settings) {
|
||||
ClientConfiguration clientConfiguration = new ClientConfiguration();
|
||||
// pkg private for tests
|
||||
static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings clientSettings) {
|
||||
final ClientConfiguration clientConfiguration = new ClientConfiguration();
|
||||
// the response metadata cache is only there for diagnostics purposes,
|
||||
// but can force objects from every response to the old generation.
|
||||
clientConfiguration.setResponseMetadataCacheSize(0);
|
||||
clientConfiguration.setProtocol(PROTOCOL_SETTING.get(settings));
|
||||
|
||||
if (PROXY_HOST_SETTING.exists(settings)) {
|
||||
String proxyHost = PROXY_HOST_SETTING.get(settings);
|
||||
Integer proxyPort = PROXY_PORT_SETTING.get(settings);
|
||||
try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings);
|
||||
SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) {
|
||||
|
||||
clientConfiguration
|
||||
.withProxyHost(proxyHost)
|
||||
.withProxyPort(proxyPort)
|
||||
.withProxyUsername(proxyUsername.toString())
|
||||
.withProxyPassword(proxyPassword.toString());
|
||||
}
|
||||
clientConfiguration.setProtocol(clientSettings.protocol);
|
||||
if (Strings.hasText(clientSettings.proxyHost)) {
|
||||
// TODO: remove this leniency, these settings should exist together and be validated
|
||||
clientConfiguration.setProxyHost(clientSettings.proxyHost);
|
||||
clientConfiguration.setProxyPort(clientSettings.proxyPort);
|
||||
clientConfiguration.setProxyUsername(clientSettings.proxyUsername);
|
||||
clientConfiguration.setProxyPassword(clientSettings.proxyPassword);
|
||||
}
|
||||
|
||||
// Increase the number of retries in case of 5xx API responses
|
||||
final Random rand = Randomness.get();
|
||||
RetryPolicy retryPolicy = new RetryPolicy(
|
||||
final RetryPolicy retryPolicy = new RetryPolicy(
|
||||
RetryPolicy.RetryCondition.NO_RETRY_CONDITION,
|
||||
new RetryPolicy.BackoffStrategy() {
|
||||
@Override
|
||||
public long delayBeforeNextRetry(AmazonWebServiceRequest originalRequest,
|
||||
AmazonClientException exception,
|
||||
int retriesAttempted) {
|
||||
// with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000)
|
||||
logger.warn("EC2 API request failed, retry again. Reason was:", exception);
|
||||
return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble()));
|
||||
}
|
||||
(originalRequest, exception, retriesAttempted) -> {
|
||||
// with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000)
|
||||
logger.warn("EC2 API request failed, retry again. Reason was:", exception);
|
||||
return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble()));
|
||||
},
|
||||
10,
|
||||
false);
|
||||
clientConfiguration.setRetryPolicy(retryPolicy);
|
||||
clientConfiguration.setSocketTimeout((int) READ_TIMEOUT_SETTING.get(settings).millis());
|
||||
|
||||
clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis);
|
||||
return clientConfiguration;
|
||||
}
|
||||
|
||||
protected static String findEndpoint(Logger logger, Settings settings) {
|
||||
String endpoint = null;
|
||||
if (ENDPOINT_SETTING.exists(settings)) {
|
||||
endpoint = ENDPOINT_SETTING.get(settings);
|
||||
logger.debug("using explicit ec2 endpoint [{}]", endpoint);
|
||||
// pkg private for tests
|
||||
static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) {
|
||||
final BasicAWSCredentials credentials = clientSettings.credentials;
|
||||
if (credentials == null) {
|
||||
logger.debug("Using either environment variables, system properties or instance profile credentials");
|
||||
return new DefaultAWSCredentialsProviderChain();
|
||||
} else {
|
||||
logger.debug("Using basic key/secret credentials");
|
||||
return new StaticCredentialsProvider(credentials);
|
||||
}
|
||||
return endpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (client != null) {
|
||||
client.shutdown();
|
||||
public AmazonEc2Reference client() {
|
||||
final LazyInitializable<AmazonEc2Reference, ElasticsearchException> clientReference = this.lazyClientReference.get();
|
||||
if (clientReference == null) {
|
||||
throw new IllegalStateException("Missing ec2 client configs");
|
||||
}
|
||||
return clientReference.getOrCompute();
|
||||
}
|
||||
|
||||
// Ensure that IdleConnectionReaper is shutdown
|
||||
/**
|
||||
* Refreshes the settings for the AmazonEC2 client. The new client will be build
|
||||
* using these new settings. The old client is usable until released. On release it
|
||||
* will be destroyed instead of being returned to the cache.
|
||||
*/
|
||||
@Override
|
||||
public void refreshAndClearCache(Ec2ClientSettings clientSettings) {
|
||||
final LazyInitializable<AmazonEc2Reference, ElasticsearchException> newClient = new LazyInitializable<>(
|
||||
() -> new AmazonEc2Reference(buildClient(clientSettings)), clientReference -> clientReference.incRef(),
|
||||
clientReference -> clientReference.decRef());
|
||||
final LazyInitializable<AmazonEc2Reference, ElasticsearchException> oldClient = this.lazyClientReference.getAndSet(newClient);
|
||||
if (oldClient != null) {
|
||||
oldClient.reset();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
final LazyInitializable<AmazonEc2Reference, ElasticsearchException> clientReference = this.lazyClientReference.getAndSet(null);
|
||||
if (clientReference != null) {
|
||||
clientReference.reset();
|
||||
}
|
||||
// shutdown IdleConnectionReaper background thread
|
||||
// it will be restarted on new client usage
|
||||
IdleConnectionReaper.shutdown();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import com.amazonaws.services.ec2.model.DescribeInstancesRequest;
|
||||
import com.amazonaws.services.ec2.model.DescribeInstancesResult;
|
||||
import com.amazonaws.services.ec2.model.Filter;
|
||||
|
@ -59,7 +58,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
|
||||
private final TransportService transportService;
|
||||
|
||||
private final AmazonEC2 client;
|
||||
private final AwsEc2Service awsEc2Service;
|
||||
|
||||
private final boolean bindAnyGroup;
|
||||
|
||||
|
@ -76,7 +75,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.client = awsEc2Service.client();
|
||||
this.awsEc2Service = awsEc2Service;
|
||||
|
||||
this.hostType = AwsEc2Service.HOST_TYPE_SETTING.get(settings);
|
||||
this.discoNodes = new DiscoNodesCache(AwsEc2Service.NODE_CACHE_TIME_SETTING.get(settings));
|
||||
|
@ -103,31 +102,31 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
|
||||
protected List<DiscoveryNode> fetchDynamicNodes() {
|
||||
|
||||
List<DiscoveryNode> discoNodes = new ArrayList<>();
|
||||
final List<DiscoveryNode> discoNodes = new ArrayList<>();
|
||||
|
||||
DescribeInstancesResult descInstances;
|
||||
try {
|
||||
final DescribeInstancesResult descInstances;
|
||||
try (AmazonEc2Reference clientReference = awsEc2Service.client()) {
|
||||
// Query EC2 API based on AZ, instance state, and tag.
|
||||
|
||||
// NOTE: we don't filter by security group during the describe instances request for two reasons:
|
||||
// 1. differences in VPCs require different parameters during query (ID vs Name)
|
||||
// 2. We want to use two different strategies: (all security groups vs. any security groups)
|
||||
descInstances = SocketAccess.doPrivileged(() -> client.describeInstances(buildDescribeInstancesRequest()));
|
||||
} catch (AmazonClientException e) {
|
||||
descInstances = SocketAccess.doPrivileged(() -> clientReference.client().describeInstances(buildDescribeInstancesRequest()));
|
||||
} catch (final AmazonClientException e) {
|
||||
logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage());
|
||||
logger.debug("Full exception:", e);
|
||||
return discoNodes;
|
||||
}
|
||||
|
||||
logger.trace("building dynamic unicast discovery nodes...");
|
||||
for (Reservation reservation : descInstances.getReservations()) {
|
||||
for (Instance instance : reservation.getInstances()) {
|
||||
for (final Reservation reservation : descInstances.getReservations()) {
|
||||
for (final Instance instance : reservation.getInstances()) {
|
||||
// lets see if we can filter based on groups
|
||||
if (!groups.isEmpty()) {
|
||||
List<GroupIdentifier> instanceSecurityGroups = instance.getSecurityGroups();
|
||||
List<String> securityGroupNames = new ArrayList<>(instanceSecurityGroups.size());
|
||||
List<String> securityGroupIds = new ArrayList<>(instanceSecurityGroups.size());
|
||||
for (GroupIdentifier sg : instanceSecurityGroups) {
|
||||
final List<GroupIdentifier> instanceSecurityGroups = instance.getSecurityGroups();
|
||||
final List<String> securityGroupNames = new ArrayList<>(instanceSecurityGroups.size());
|
||||
final List<String> securityGroupIds = new ArrayList<>(instanceSecurityGroups.size());
|
||||
for (final GroupIdentifier sg : instanceSecurityGroups) {
|
||||
securityGroupNames.add(sg.getGroupName());
|
||||
securityGroupIds.add(sg.getGroupId());
|
||||
}
|
||||
|
@ -162,10 +161,10 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
address = instance.getPublicIpAddress();
|
||||
} else if (hostType.startsWith(TAG_PREFIX)) {
|
||||
// Reading the node host from its metadata
|
||||
String tagName = hostType.substring(TAG_PREFIX.length());
|
||||
final String tagName = hostType.substring(TAG_PREFIX.length());
|
||||
logger.debug("reading hostname from [{}] instance tag", tagName);
|
||||
List<Tag> tags = instance.getTags();
|
||||
for (Tag tag : tags) {
|
||||
final List<Tag> tags = instance.getTags();
|
||||
for (final Tag tag : tags) {
|
||||
if (tag.getKey().equals(tagName)) {
|
||||
address = tag.getValue();
|
||||
logger.debug("using [{}] as the instance address", address);
|
||||
|
@ -177,13 +176,13 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
if (address != null) {
|
||||
try {
|
||||
// we only limit to 1 port per address, makes no sense to ping 100 ports
|
||||
TransportAddress[] addresses = transportService.addressesFromString(address, 1);
|
||||
final TransportAddress[] addresses = transportService.addressesFromString(address, 1);
|
||||
for (int i = 0; i < addresses.length; i++) {
|
||||
logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]);
|
||||
discoNodes.add(new DiscoveryNode(instance.getInstanceId(), "#cloud-" + instance.getInstanceId() + "-" + i,
|
||||
addresses[i], emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
} catch (final Exception e) {
|
||||
final String finalAddress = address;
|
||||
logger.warn(
|
||||
(Supplier<?>)
|
||||
|
@ -201,12 +200,12 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
}
|
||||
|
||||
private DescribeInstancesRequest buildDescribeInstancesRequest() {
|
||||
DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest()
|
||||
final DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest()
|
||||
.withFilters(
|
||||
new Filter("instance-state-name").withValues("running", "pending")
|
||||
);
|
||||
|
||||
for (Map.Entry<String, List<String>> tagFilter : tags.entrySet()) {
|
||||
for (final Map.Entry<String, List<String>> tagFilter : tags.entrySet()) {
|
||||
// for a given tag key, OR relationship for multiple different values
|
||||
describeInstancesRequest.withFilters(
|
||||
new Filter("tag:" + tagFilter.getKey()).withValues(tagFilter.getValue())
|
||||
|
@ -238,7 +237,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
|
|||
|
||||
@Override
|
||||
protected List<DiscoveryNode> refresh() {
|
||||
List<DiscoveryNode> nodes = fetchDynamicNodes();
|
||||
final List<DiscoveryNode> nodes = fetchDynamicNodes();
|
||||
empty = nodes.isEmpty();
|
||||
return nodes;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* A container for settings used to create an EC2 client.
|
||||
*/
|
||||
final class Ec2ClientSettings {
|
||||
|
||||
/** The access key (ie login id) for connecting to ec2. */
|
||||
static final Setting<SecureString> ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", null);
|
||||
|
||||
/** The secret key (ie password) for connecting to ec2. */
|
||||
static final Setting<SecureString> SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null);
|
||||
|
||||
/** The host name of a proxy to connect to ec2 through. */
|
||||
static final Setting<String> PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope);
|
||||
|
||||
/** The port of a proxy to connect to ec2 through. */
|
||||
static final Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope);
|
||||
|
||||
/** An override for the ec2 endpoint to connect to. */
|
||||
static final Setting<String> ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", "", s -> s.toLowerCase(Locale.ROOT),
|
||||
Property.NodeScope);
|
||||
|
||||
/** The protocol to use to connect to to ec2. */
|
||||
static final Setting<Protocol> PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", "https",
|
||||
s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope);
|
||||
|
||||
/** The username of a proxy to connect to s3 through. */
|
||||
static final Setting<SecureString> PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username", null);
|
||||
|
||||
/** The password of a proxy to connect to s3 through. */
|
||||
static final Setting<SecureString> PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password", null);
|
||||
|
||||
/** The socket timeout for connecting to s3. */
|
||||
static final Setting<TimeValue> READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout",
|
||||
TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope);
|
||||
|
||||
/** Credentials to authenticate with ec2. */
|
||||
final BasicAWSCredentials credentials;
|
||||
|
||||
/**
|
||||
* The ec2 endpoint the client should talk to, or empty string to use the
|
||||
* default.
|
||||
*/
|
||||
final String endpoint;
|
||||
|
||||
/** The protocol to use to talk to ec2. Defaults to https. */
|
||||
final Protocol protocol;
|
||||
|
||||
/** An optional proxy host that requests to ec2 should be made through. */
|
||||
final String proxyHost;
|
||||
|
||||
/** The port number the proxy host should be connected on. */
|
||||
final int proxyPort;
|
||||
|
||||
// these should be "secure" yet the api for the ec2 client only takes String, so
|
||||
// storing them
|
||||
// as SecureString here won't really help with anything
|
||||
/** An optional username for the proxy host, for basic authentication. */
|
||||
final String proxyUsername;
|
||||
|
||||
/** An optional password for the proxy host, for basic authentication. */
|
||||
final String proxyPassword;
|
||||
|
||||
/** The read timeout for the ec2 client. */
|
||||
final int readTimeoutMillis;
|
||||
|
||||
protected Ec2ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort,
|
||||
String proxyUsername, String proxyPassword, int readTimeoutMillis) {
|
||||
this.credentials = credentials;
|
||||
this.endpoint = endpoint;
|
||||
this.protocol = protocol;
|
||||
this.proxyHost = proxyHost;
|
||||
this.proxyPort = proxyPort;
|
||||
this.proxyUsername = proxyUsername;
|
||||
this.proxyPassword = proxyPassword;
|
||||
this.readTimeoutMillis = readTimeoutMillis;
|
||||
}
|
||||
|
||||
static BasicAWSCredentials loadCredentials(Settings settings) {
|
||||
try (SecureString accessKey = ACCESS_KEY_SETTING.get(settings);
|
||||
SecureString secretKey = SECRET_KEY_SETTING.get(settings);) {
|
||||
if (accessKey.length() != 0) {
|
||||
if (secretKey.length() != 0) {
|
||||
return new BasicAWSCredentials(accessKey.toString(), secretKey.toString());
|
||||
} else {
|
||||
throw new IllegalArgumentException("Missing secret key for ec2 client.");
|
||||
}
|
||||
} else if (secretKey.length() != 0) {
|
||||
throw new IllegalArgumentException("Missing access key for ec2 client.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// pkg private for tests
|
||||
/** Parse settings for a single client. */
|
||||
static Ec2ClientSettings getClientSettings(Settings settings) {
|
||||
final BasicAWSCredentials credentials = loadCredentials(settings);
|
||||
try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings);
|
||||
SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) {
|
||||
return new Ec2ClientSettings(
|
||||
credentials,
|
||||
ENDPOINT_SETTING.get(settings),
|
||||
PROTOCOL_SETTING.get(settings),
|
||||
PROXY_HOST_SETTING.get(settings),
|
||||
PROXY_PORT_SETTING.get(settings),
|
||||
proxyUsername.toString(),
|
||||
proxyPassword.toString(),
|
||||
(int)READ_TIMEOUT_SETTING.get(settings).millis());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -21,8 +21,6 @@ package org.elasticsearch.discovery.ec2;
|
|||
|
||||
import com.amazonaws.util.json.Jackson;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -33,10 +31,10 @@ import org.elasticsearch.discovery.zen.UnicastHostsProvider;
|
|||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.plugins.DiscoveryPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
|
@ -52,7 +50,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable {
|
||||
public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReloadablePlugin {
|
||||
|
||||
private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class);
|
||||
public static final String EC2 = "ec2";
|
||||
|
@ -68,22 +66,27 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
// ClientConfiguration clinit has some classloader problems
|
||||
// TODO: fix that
|
||||
Class.forName("com.amazonaws.ClientConfiguration");
|
||||
} catch (ClassNotFoundException e) {
|
||||
} catch (final ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
private Settings settings;
|
||||
// stashed when created in order to properly close
|
||||
private final SetOnce<AwsEc2ServiceImpl> ec2Service = new SetOnce<>();
|
||||
private final Settings settings;
|
||||
// protected for testing
|
||||
protected final AwsEc2Service ec2Service;
|
||||
|
||||
public Ec2DiscoveryPlugin(Settings settings) {
|
||||
this.settings = settings;
|
||||
this(settings, new AwsEc2ServiceImpl(settings));
|
||||
}
|
||||
|
||||
|
||||
protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) {
|
||||
this.settings = settings;
|
||||
this.ec2Service = ec2Service;
|
||||
// eagerly load client settings when secure settings are accessible
|
||||
reload(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) {
|
||||
|
@ -94,25 +97,22 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
@Override
|
||||
public Map<String, Supplier<UnicastHostsProvider>> getZenHostsProviders(TransportService transportService,
|
||||
NetworkService networkService) {
|
||||
return Collections.singletonMap(EC2, () -> {
|
||||
ec2Service.set(new AwsEc2ServiceImpl(settings));
|
||||
return new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service.get());
|
||||
});
|
||||
return Collections.singletonMap(EC2, () -> new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service));
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(
|
||||
// Register EC2 discovery settings: discovery.ec2
|
||||
AwsEc2Service.ACCESS_KEY_SETTING,
|
||||
AwsEc2Service.SECRET_KEY_SETTING,
|
||||
AwsEc2Service.ENDPOINT_SETTING,
|
||||
AwsEc2Service.PROTOCOL_SETTING,
|
||||
AwsEc2Service.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.READ_TIMEOUT_SETTING,
|
||||
Ec2ClientSettings.ACCESS_KEY_SETTING,
|
||||
Ec2ClientSettings.SECRET_KEY_SETTING,
|
||||
Ec2ClientSettings.ENDPOINT_SETTING,
|
||||
Ec2ClientSettings.PROTOCOL_SETTING,
|
||||
Ec2ClientSettings.PROXY_HOST_SETTING,
|
||||
Ec2ClientSettings.PROXY_PORT_SETTING,
|
||||
Ec2ClientSettings.PROXY_USERNAME_SETTING,
|
||||
Ec2ClientSettings.PROXY_PASSWORD_SETTING,
|
||||
Ec2ClientSettings.READ_TIMEOUT_SETTING,
|
||||
AwsEc2Service.HOST_TYPE_SETTING,
|
||||
AwsEc2Service.ANY_GROUP_SETTING,
|
||||
AwsEc2Service.GROUPS_SETTING,
|
||||
|
@ -125,10 +125,10 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
|
||||
@Override
|
||||
public Settings additionalSettings() {
|
||||
Settings.Builder builder = Settings.builder();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
|
||||
// Adds a node attribute for the ec2 availability zone
|
||||
String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone";
|
||||
final String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone";
|
||||
builder.put(getAvailabilityZoneNodeAttributes(settings, azMetadataUrl));
|
||||
return builder.build();
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
if (AwsEc2Service.AUTO_ATTRIBUTE_SETTING.get(settings) == false) {
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
Settings.Builder attrs = Settings.builder();
|
||||
final Settings.Builder attrs = Settings.builder();
|
||||
|
||||
final URL url;
|
||||
final URLConnection urlConnection;
|
||||
|
@ -148,7 +148,7 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
logger.debug("obtaining ec2 [placement/availability-zone] from ec2 meta-data url {}", url);
|
||||
urlConnection = SocketAccess.doPrivilegedIOException(url::openConnection);
|
||||
urlConnection.setConnectTimeout(2000);
|
||||
} catch (IOException e) {
|
||||
} catch (final IOException e) {
|
||||
// should not happen, we know the url is not malformed, and openConnection does not actually hit network
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
|
@ -156,13 +156,13 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
try (InputStream in = SocketAccess.doPrivilegedIOException(urlConnection::getInputStream);
|
||||
BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) {
|
||||
|
||||
String metadataResult = urlReader.readLine();
|
||||
if (metadataResult == null || metadataResult.length() == 0) {
|
||||
final String metadataResult = urlReader.readLine();
|
||||
if ((metadataResult == null) || (metadataResult.length() == 0)) {
|
||||
throw new IllegalStateException("no ec2 metadata returned from " + url);
|
||||
} else {
|
||||
attrs.put(Node.NODE_ATTRIBUTES.getKey() + "aws_availability_zone", metadataResult);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
} catch (final IOException e) {
|
||||
// this is lenient so the plugin does not fail when installed outside of ec2
|
||||
logger.error("failed to get metadata for [placement/availability-zone]", e);
|
||||
}
|
||||
|
@ -172,6 +172,13 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
IOUtils.close(ec2Service.get());
|
||||
ec2Service.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(Settings settings) {
|
||||
// secure settings should be readable
|
||||
final Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings);
|
||||
ec2Service.refreshAndClearCache(clientSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,9 @@ package org.elasticsearch.discovery.ec2;
|
|||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.AmazonWebServiceRequest;
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.ResponseMetadata;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.regions.Region;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionRequest;
|
||||
|
@ -528,9 +530,12 @@ public class AmazonEC2Mock implements AmazonEC2 {
|
|||
public static final String PREFIX_PRIVATE_DNS = "mock-ip-";
|
||||
public static final String SUFFIX_PRIVATE_DNS = ".ec2.internal";
|
||||
|
||||
List<Instance> instances = new ArrayList<>();
|
||||
final List<Instance> instances = new ArrayList<>();
|
||||
String endpoint;
|
||||
final AWSCredentialsProvider credentials;
|
||||
final ClientConfiguration configuration;
|
||||
|
||||
public AmazonEC2Mock(int nodes, List<List<Tag>> tagsList) {
|
||||
public AmazonEC2Mock(int nodes, List<List<Tag>> tagsList, AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
if (tagsList != null) {
|
||||
assert tagsList.size() == nodes;
|
||||
}
|
||||
|
@ -552,7 +557,8 @@ public class AmazonEC2Mock implements AmazonEC2 {
|
|||
|
||||
instances.add(instance);
|
||||
}
|
||||
|
||||
this.credentials = credentials;
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -642,7 +648,7 @@ public class AmazonEC2Mock implements AmazonEC2 {
|
|||
|
||||
@Override
|
||||
public void setEndpoint(String endpoint) throws IllegalArgumentException {
|
||||
throw new UnsupportedOperationException("Not supported in mock");
|
||||
this.endpoint = endpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -2110,7 +2116,6 @@ public class AmazonEC2Mock implements AmazonEC2 {
|
|||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
throw new UnsupportedOperationException("Not supported in mock");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -26,31 +26,31 @@ import com.amazonaws.auth.AWSCredentialsProvider;
|
|||
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.ec2.AwsEc2Service;
|
||||
import org.elasticsearch.discovery.ec2.AwsEc2ServiceImpl;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class AwsEc2ServiceImplTests extends ESTestCase {
|
||||
|
||||
public void testAWSCredentialsWithSystemProviders() {
|
||||
AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger, Settings.EMPTY);
|
||||
final AWSCredentialsProvider credentialsProvider = AwsEc2ServiceImpl.buildCredentials(logger,
|
||||
Ec2ClientSettings.getClientSettings(Settings.EMPTY));
|
||||
assertThat(credentialsProvider, instanceOf(DefaultAWSCredentialsProviderChain.class));
|
||||
}
|
||||
|
||||
public void testAWSCredentialsWithElasticsearchAwsSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("discovery.ec2.access_key", "aws_key");
|
||||
secureSettings.setString("discovery.ec2.secret_key", "aws_secret");
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
launchAWSCredentialsWithElasticsearchSettingsTest(settings, "aws_key", "aws_secret");
|
||||
}
|
||||
|
||||
protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings settings, String expectedKey, String expectedSecret) {
|
||||
AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, settings).getCredentials();
|
||||
final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, Ec2ClientSettings.getClientSettings(settings))
|
||||
.getCredentials();
|
||||
assertThat(credentials.getAWSAccessKeyId(), is(expectedKey));
|
||||
assertThat(credentials.getAWSSecretKey(), is(expectedSecret));
|
||||
}
|
||||
|
@ -61,10 +61,10 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAWSConfigurationWithAwsSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("discovery.ec2.proxy.username", "aws_proxy_username");
|
||||
secureSettings.setString("discovery.ec2.proxy.password", "aws_proxy_password");
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.put("discovery.ec2.protocol", "http")
|
||||
.put("discovery.ec2.proxy.host", "aws_proxy_host")
|
||||
.put("discovery.ec2.proxy.port", 8080)
|
||||
|
@ -81,7 +81,8 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
String expectedProxyUsername,
|
||||
String expectedProxyPassword,
|
||||
int expectedReadTimeout) {
|
||||
ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, settings);
|
||||
final ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger,
|
||||
Ec2ClientSettings.getClientSettings(settings));
|
||||
|
||||
assertThat(configuration.getResponseMetadataCacheSize(), is(0));
|
||||
assertThat(configuration.getProtocol(), is(expectedProtocol));
|
||||
|
@ -92,16 +93,4 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout));
|
||||
}
|
||||
|
||||
public void testDefaultEndpoint() {
|
||||
String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, Settings.EMPTY);
|
||||
assertThat(endpoint, nullValue());
|
||||
}
|
||||
|
||||
public void testSpecificEndpoint() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "ec2.endpoint")
|
||||
.build();
|
||||
String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);
|
||||
assertThat(endpoint, is("ec2.endpoint"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,18 +19,19 @@
|
|||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class AwsEc2ServiceMock extends AbstractLifecycleComponent implements AwsEc2Service {
|
||||
public class AwsEc2ServiceMock extends AwsEc2ServiceImpl {
|
||||
|
||||
private int nodes;
|
||||
private List<List<Tag>> tagsList;
|
||||
private AmazonEC2 client;
|
||||
private final int nodes;
|
||||
private final List<List<Tag>> tagsList;
|
||||
|
||||
public AwsEc2ServiceMock(Settings settings, int nodes, List<List<Tag>> tagsList) {
|
||||
super(settings);
|
||||
|
@ -39,26 +40,8 @@ public class AwsEc2ServiceMock extends AbstractLifecycleComponent implements Aws
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized AmazonEC2 client() {
|
||||
if (client == null) {
|
||||
client = new AmazonEC2Mock(nodes, tagsList);
|
||||
}
|
||||
|
||||
return client;
|
||||
AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
return new AmazonEC2Mock(nodes, tagsList, credentials, configuration);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,14 +17,22 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
public class AzureServiceDisableException extends IllegalStateException {
|
||||
public AzureServiceDisableException(String msg) {
|
||||
super(msg);
|
||||
import com.amazonaws.services.ec2.model.Tag;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin {
|
||||
|
||||
Ec2DiscoveryPluginMock(Settings settings) {
|
||||
this(settings, 1, null);
|
||||
}
|
||||
|
||||
public AzureServiceDisableException(String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
public Ec2DiscoveryPluginMock(Settings settings, int nodes, List<List<Tag>> tagsList) {
|
||||
super(settings, new AwsEc2ServiceMock(settings, nodes, tagsList));
|
||||
}
|
||||
|
||||
}
|
|
@ -19,12 +19,17 @@
|
|||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
import org.elasticsearch.discovery.ec2.AwsEc2Service;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.ec2.Ec2DiscoveryPlugin;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -33,14 +38,14 @@ import org.elasticsearch.test.ESTestCase;
|
|||
public class Ec2DiscoveryPluginTests extends ESTestCase {
|
||||
|
||||
private Settings getNodeAttributes(Settings settings, String url) {
|
||||
Settings realSettings = Settings.builder()
|
||||
final Settings realSettings = Settings.builder()
|
||||
.put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), true)
|
||||
.put(settings).build();
|
||||
return Ec2DiscoveryPlugin.getAvailabilityZoneNodeAttributes(realSettings, url);
|
||||
}
|
||||
|
||||
private void assertNodeAttributes(Settings settings, String url, String expected) {
|
||||
Settings additional = getNodeAttributes(settings, url);
|
||||
final Settings additional = getNodeAttributes(settings, url);
|
||||
if (expected == null) {
|
||||
assertTrue(additional.isEmpty());
|
||||
} else {
|
||||
|
@ -49,36 +54,106 @@ public class Ec2DiscoveryPluginTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testNodeAttributesDisabled() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), false).build();
|
||||
assertNodeAttributes(settings, "bogus", null);
|
||||
}
|
||||
|
||||
public void testNodeAttributes() throws Exception {
|
||||
Path zoneUrl = createTempFile();
|
||||
final Path zoneUrl = createTempFile();
|
||||
Files.write(zoneUrl, Arrays.asList("us-east-1c"));
|
||||
assertNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString(), "us-east-1c");
|
||||
}
|
||||
|
||||
public void testNodeAttributesBogusUrl() {
|
||||
UncheckedIOException e = expectThrows(UncheckedIOException.class, () ->
|
||||
final UncheckedIOException e = expectThrows(UncheckedIOException.class, () ->
|
||||
getNodeAttributes(Settings.EMPTY, "bogus")
|
||||
);
|
||||
assertNotNull(e.getCause());
|
||||
String msg = e.getCause().getMessage();
|
||||
final String msg = e.getCause().getMessage();
|
||||
assertTrue(msg, msg.contains("no protocol: bogus"));
|
||||
}
|
||||
|
||||
public void testNodeAttributesEmpty() throws Exception {
|
||||
Path zoneUrl = createTempFile();
|
||||
IllegalStateException e = expectThrows(IllegalStateException.class, () ->
|
||||
final Path zoneUrl = createTempFile();
|
||||
final IllegalStateException e = expectThrows(IllegalStateException.class, () ->
|
||||
getNodeAttributes(Settings.EMPTY, zoneUrl.toUri().toURL().toString())
|
||||
);
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("no ec2 metadata returned"));
|
||||
}
|
||||
|
||||
public void testNodeAttributesErrorLenient() throws Exception {
|
||||
Path dne = createTempDir().resolve("dne");
|
||||
final Path dne = createTempDir().resolve("dne");
|
||||
assertNodeAttributes(Settings.EMPTY, dne.toUri().toURL().toString(), null);
|
||||
}
|
||||
|
||||
public void testDefaultEndpoint() throws IOException {
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) {
|
||||
final String endpoint = ((AmazonEC2Mock) plugin.ec2Service.client().client()).endpoint;
|
||||
assertThat(endpoint, nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSpecificEndpoint() throws IOException {
|
||||
final Settings settings = Settings.builder().put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2.endpoint").build();
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings)) {
|
||||
final String endpoint = ((AmazonEC2Mock) plugin.ec2Service.client().client()).endpoint;
|
||||
assertThat(endpoint, is("ec2.endpoint"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testClientSettingsReInit() throws IOException {
|
||||
final MockSecureSettings mockSecure1 = new MockSecureSettings();
|
||||
mockSecure1.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_1");
|
||||
mockSecure1.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_1");
|
||||
mockSecure1.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1");
|
||||
mockSecure1.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1");
|
||||
final Settings settings1 = Settings.builder()
|
||||
.put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_1")
|
||||
.put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 881)
|
||||
.put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_1")
|
||||
.setSecureSettings(mockSecure1)
|
||||
.build();
|
||||
final MockSecureSettings mockSecure2 = new MockSecureSettings();
|
||||
mockSecure2.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_2");
|
||||
mockSecure2.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_2");
|
||||
mockSecure2.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2");
|
||||
mockSecure2.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2");
|
||||
final Settings settings2 = Settings.builder()
|
||||
.put(Ec2ClientSettings.PROXY_HOST_SETTING.getKey(), "proxy_host_2")
|
||||
.put(Ec2ClientSettings.PROXY_PORT_SETTING.getKey(), 882)
|
||||
.put(Ec2ClientSettings.ENDPOINT_SETTING.getKey(), "ec2_endpoint_2")
|
||||
.setSecureSettings(mockSecure2)
|
||||
.build();
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings1)) {
|
||||
try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) {
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1"));
|
||||
// reload secure settings2
|
||||
plugin.reload(settings2);
|
||||
// client is not released, it is still using the old settings
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1"));
|
||||
}
|
||||
try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) {
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_2"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_2"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_2"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2"));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(882));
|
||||
assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_2"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.junit.AfterClass;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -91,11 +92,15 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected List<DiscoveryNode> buildDynamicNodes(Settings nodeSettings, int nodes, List<List<Tag>> tagsList) {
|
||||
AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(nodeSettings, nodes, tagsList);
|
||||
AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, awsEc2Service);
|
||||
List<DiscoveryNode> discoveryNodes = provider.buildDynamicNodes();
|
||||
logger.debug("--> nodes found: {}", discoveryNodes);
|
||||
return discoveryNodes;
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY, nodes, tagsList)) {
|
||||
AwsEc2UnicastHostsProvider provider = new AwsEc2UnicastHostsProvider(nodeSettings, transportService, plugin.ec2Service);
|
||||
List<DiscoveryNode> discoveryNodes = provider.buildDynamicNodes();
|
||||
logger.debug("--> nodes found: {}", discoveryNodes);
|
||||
return discoveryNodes;
|
||||
} catch (IOException e) {
|
||||
fail("Unexpected IOException");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public void testDefaultSettings() throws InterruptedException {
|
||||
|
@ -315,22 +320,23 @@ public class Ec2DiscoveryTests extends ESTestCase {
|
|||
public void testGetNodeListCached() throws Exception {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(AwsEc2Service.NODE_CACHE_TIME_SETTING.getKey(), "500ms");
|
||||
AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null);
|
||||
DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, awsEc2Service) {
|
||||
@Override
|
||||
protected List<DiscoveryNode> fetchDynamicNodes() {
|
||||
fetchCount++;
|
||||
return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1);
|
||||
try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(Settings.EMPTY)) {
|
||||
DummyEc2HostProvider provider = new DummyEc2HostProvider(builder.build(), transportService, plugin.ec2Service) {
|
||||
@Override
|
||||
protected List<DiscoveryNode> fetchDynamicNodes() {
|
||||
fetchCount++;
|
||||
return Ec2DiscoveryTests.this.buildDynamicNodes(Settings.EMPTY, 1);
|
||||
}
|
||||
};
|
||||
for (int i=0; i<3; i++) {
|
||||
provider.buildDynamicNodes();
|
||||
}
|
||||
};
|
||||
for (int i=0; i<3; i++) {
|
||||
provider.buildDynamicNodes();
|
||||
assertThat(provider.fetchCount, is(1));
|
||||
Thread.sleep(1_000L); // wait for cache to expire
|
||||
for (int i=0; i<3; i++) {
|
||||
provider.buildDynamicNodes();
|
||||
}
|
||||
assertThat(provider.fetchCount, is(2));
|
||||
}
|
||||
assertThat(provider.fetchCount, is(1));
|
||||
Thread.sleep(1_000L); // wait for cache to expire
|
||||
for (int i=0; i<3; i++) {
|
||||
provider.buildDynamicNodes();
|
||||
}
|
||||
assertThat(provider.fetchCount, is(2));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,46 +20,44 @@
|
|||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
import static org.elasticsearch.repositories.azure.AzureRepository.Repository;
|
||||
|
||||
public class AzureBlobStore extends AbstractComponent implements BlobStore {
|
||||
|
||||
private final AzureStorageService client;
|
||||
private final AzureStorageService service;
|
||||
|
||||
private final String clientName;
|
||||
private final LocationMode locMode;
|
||||
private final String container;
|
||||
private final LocationMode locationMode;
|
||||
|
||||
public AzureBlobStore(RepositoryMetaData metadata, Settings settings,
|
||||
AzureStorageService client) throws URISyntaxException, StorageException {
|
||||
public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStorageService service)
|
||||
throws URISyntaxException, StorageException {
|
||||
super(settings);
|
||||
this.client = client;
|
||||
this.container = Repository.CONTAINER_SETTING.get(metadata.settings());
|
||||
this.clientName = Repository.CLIENT_NAME.get(metadata.settings());
|
||||
|
||||
String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings());
|
||||
if (Strings.hasLength(modeStr)) {
|
||||
this.locMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT));
|
||||
} else {
|
||||
this.locMode = LocationMode.PRIMARY_ONLY;
|
||||
}
|
||||
this.service = service;
|
||||
// locationMode is set per repository, not per client
|
||||
this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings());
|
||||
final Map<String, AzureStorageSettings> prevSettings = this.service.refreshAndClearCache(emptyMap());
|
||||
final Map<String, AzureStorageSettings> newSettings = AzureStorageSettings.overrideLocationMode(prevSettings, this.locationMode);
|
||||
this.service.refreshAndClearCache(newSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -71,7 +69,11 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore {
|
|||
* Gets the configured {@link LocationMode} for the Azure storage requests.
|
||||
*/
|
||||
public LocationMode getLocationMode() {
|
||||
return locMode;
|
||||
return locationMode;
|
||||
}
|
||||
|
||||
public String getClientName() {
|
||||
return clientName;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -80,12 +82,13 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void delete(BlobPath path) {
|
||||
String keyPath = path.buildAsString();
|
||||
public void delete(BlobPath path) throws IOException {
|
||||
final String keyPath = path.buildAsString();
|
||||
try {
|
||||
this.client.deleteFiles(this.clientName, this.locMode, container, keyPath);
|
||||
service.deleteFiles(clientName, container, keyPath);
|
||||
} catch (URISyntaxException | StorageException e) {
|
||||
logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage());
|
||||
logger.warn("cannot access [{}] in container {{}}: {}", keyPath, container, e.getMessage());
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,30 +96,29 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore {
|
|||
public void close() {
|
||||
}
|
||||
|
||||
public boolean doesContainerExist()
|
||||
{
|
||||
return this.client.doesContainerExist(this.clientName, this.locMode, container);
|
||||
public boolean containerExist() throws URISyntaxException, StorageException {
|
||||
return service.doesContainerExist(clientName, container);
|
||||
}
|
||||
|
||||
public boolean blobExists(String blob) throws URISyntaxException, StorageException {
|
||||
return this.client.blobExists(this.clientName, this.locMode, container, blob);
|
||||
return service.blobExists(clientName, container, blob);
|
||||
}
|
||||
|
||||
public void deleteBlob(String blob) throws URISyntaxException, StorageException {
|
||||
this.client.deleteBlob(this.clientName, this.locMode, container, blob);
|
||||
service.deleteBlob(clientName, container, blob);
|
||||
}
|
||||
|
||||
public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException {
|
||||
return this.client.getInputStream(this.clientName, this.locMode, container, blob);
|
||||
return service.getInputStream(clientName, container, blob);
|
||||
}
|
||||
|
||||
public Map<String, BlobMetaData> listBlobsByPrefix(String keyPath, String prefix)
|
||||
throws URISyntaxException, StorageException {
|
||||
return this.client.listBlobsByPrefix(this.clientName, this.locMode, container, keyPath, prefix);
|
||||
return service.listBlobsByPrefix(clientName, container, keyPath, prefix);
|
||||
}
|
||||
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException,
|
||||
FileAlreadyExistsException {
|
||||
this.client.writeBlob(this.clientName, this.locMode, container, blobName, inputStream, blobSize);
|
||||
service.writeBlob(this.clientName, container, blobName, inputStream, blobSize);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.repositories.azure;
|
|||
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -33,6 +35,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||
import org.elasticsearch.snapshots.SnapshotCreationException;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -60,19 +63,19 @@ public class AzureRepository extends BlobStoreRepository {
|
|||
public static final String TYPE = "azure";
|
||||
|
||||
public static final class Repository {
|
||||
|
||||
@Deprecated // Replaced by client
|
||||
public static final Setting<String> ACCOUNT_SETTING = new Setting<>("account", "default", Function.identity(),
|
||||
Property.NodeScope, Property.Deprecated);
|
||||
public static final Setting<String> CLIENT_NAME = new Setting<>("client", ACCOUNT_SETTING, Function.identity());
|
||||
|
||||
public static final Setting<String> CONTAINER_SETTING =
|
||||
new Setting<>("container", "elasticsearch-snapshots", Function.identity(), Property.NodeScope);
|
||||
public static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope);
|
||||
public static final Setting<String> LOCATION_MODE_SETTING = Setting.simpleString("location_mode", Property.NodeScope);
|
||||
public static final Setting<LocationMode> LOCATION_MODE_SETTING = new Setting<>("location_mode",
|
||||
s -> LocationMode.PRIMARY_ONLY.toString(), s -> LocationMode.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope);
|
||||
public static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting("readonly", false, Property.NodeScope);
|
||||
}
|
||||
|
||||
private final AzureBlobStore blobStore;
|
||||
|
@ -81,45 +84,32 @@ public class AzureRepository extends BlobStoreRepository {
|
|||
private final boolean compress;
|
||||
private final boolean readonly;
|
||||
|
||||
public AzureRepository(RepositoryMetaData metadata, Environment environment,
|
||||
NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService)
|
||||
throws IOException, URISyntaxException, StorageException {
|
||||
public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry,
|
||||
AzureStorageService storageService) throws IOException, URISyntaxException, StorageException {
|
||||
super(metadata, environment.settings(), namedXContentRegistry);
|
||||
|
||||
blobStore = new AzureBlobStore(metadata, environment.settings(), storageService);
|
||||
String container = Repository.CONTAINER_SETTING.get(metadata.settings());
|
||||
this.blobStore = new AzureBlobStore(metadata, environment.settings(), storageService);
|
||||
this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings());
|
||||
this.compress = Repository.COMPRESS_SETTING.get(metadata.settings());
|
||||
String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings());
|
||||
Boolean forcedReadonly = metadata.settings().getAsBoolean("readonly", null);
|
||||
// If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting.
|
||||
// For secondary_only setting, the repository should be read only
|
||||
if (forcedReadonly == null) {
|
||||
if (Strings.hasLength(modeStr)) {
|
||||
LocationMode locationMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT));
|
||||
this.readonly = locationMode == LocationMode.SECONDARY_ONLY;
|
||||
} else {
|
||||
this.readonly = false;
|
||||
}
|
||||
if (Repository.READONLY_SETTING.exists(metadata.settings())) {
|
||||
this.readonly = Repository.READONLY_SETTING.get(metadata.settings());
|
||||
} else {
|
||||
readonly = forcedReadonly;
|
||||
this.readonly = this.blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY;
|
||||
}
|
||||
|
||||
String basePath = Repository.BASE_PATH_SETTING.get(metadata.settings());
|
||||
|
||||
final String basePath = Strings.trimLeadingCharacter(Repository.BASE_PATH_SETTING.get(metadata.settings()), '/');
|
||||
if (Strings.hasLength(basePath)) {
|
||||
// Remove starting / if any
|
||||
basePath = Strings.trimLeadingCharacter(basePath, '/');
|
||||
BlobPath path = new BlobPath();
|
||||
for(String elem : basePath.split("/")) {
|
||||
for(final String elem : basePath.split("/")) {
|
||||
path = path.add(elem);
|
||||
}
|
||||
this.basePath = path;
|
||||
} else {
|
||||
this.basePath = BlobPath.cleanPath();
|
||||
}
|
||||
logger.debug("using container [{}], chunk_size [{}], compress [{}], base_path [{}]",
|
||||
container, chunkSize, compress, basePath);
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, compress, basePath));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,9 +143,13 @@ public class AzureRepository extends BlobStoreRepository {
|
|||
|
||||
@Override
|
||||
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData clusterMetadata) {
|
||||
if (blobStore.doesContainerExist() == false) {
|
||||
throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before " +
|
||||
" creating an azure snapshot repository backed by it.");
|
||||
try {
|
||||
if (blobStore.containerExist() == false) {
|
||||
throw new IllegalArgumentException("The bucket [" + blobStore + "] does not exist. Please create it before "
|
||||
+ " creating an azure snapshot repository backed by it.");
|
||||
}
|
||||
} catch (URISyntaxException | StorageException e) {
|
||||
throw new SnapshotCreationException(metadata.name(), snapshotId, e);
|
||||
}
|
||||
super.initializeSnapshot(snapshotId, indices, clusterMetadata);
|
||||
}
|
||||
|
|
|
@ -21,12 +21,13 @@ package org.elasticsearch.repositories.azure;
|
|||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.plugins.RepositoryPlugin;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -35,24 +36,20 @@ import java.util.Map;
|
|||
/**
|
||||
* A plugin to add a repository type that writes to and from the Azure cloud storage service.
|
||||
*/
|
||||
public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin {
|
||||
public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin {
|
||||
|
||||
private final Map<String, AzureStorageSettings> clientsSettings;
|
||||
|
||||
// overridable for tests
|
||||
protected AzureStorageService createStorageService(Settings settings) {
|
||||
return new AzureStorageServiceImpl(settings, clientsSettings);
|
||||
}
|
||||
// protected for testing
|
||||
final AzureStorageService azureStoreService;
|
||||
|
||||
public AzureRepositoryPlugin(Settings settings) {
|
||||
// eagerly load client settings so that secure settings are read
|
||||
clientsSettings = AzureStorageSettings.load(settings);
|
||||
this.azureStoreService = new AzureStorageServiceImpl(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
|
||||
return Collections.singletonMap(AzureRepository.TYPE,
|
||||
(metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, createStorageService(env.settings())));
|
||||
(metadata) -> new AzureRepository(metadata, env, namedXContentRegistry, azureStoreService));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -67,4 +64,14 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
AzureStorageSettings.PROXY_PORT_SETTING
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(Settings settings) {
|
||||
// secure settings should be readable
|
||||
final Map<String, AzureStorageSettings> clientsSettings = AzureStorageSettings.load(settings);
|
||||
if (clientsSettings.isEmpty()) {
|
||||
throw new SettingsException("If you want to use an azure repository, you need to define a client configuration.");
|
||||
}
|
||||
azureStoreService.refreshAndClearCache(clientsSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,12 @@
|
|||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.OperationContext;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.blob.CloudBlobClient;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
|
@ -30,6 +33,7 @@ import java.io.InputStream;
|
|||
import java.net.URISyntaxException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Azure Storage Service interface
|
||||
|
@ -37,29 +41,46 @@ import java.util.Map;
|
|||
*/
|
||||
public interface AzureStorageService {
|
||||
|
||||
/**
|
||||
* Creates a {@code CloudBlobClient} on each invocation using the current client
|
||||
* settings. CloudBlobClient is not thread safe and the settings can change,
|
||||
* therefore the instance is not cache-able and should only be reused inside a
|
||||
* thread for logically coupled ops. The {@code OperationContext} is used to
|
||||
* specify the proxy, but a new context is *required* for each call.
|
||||
*/
|
||||
Tuple<CloudBlobClient, Supplier<OperationContext>> client(String clientName);
|
||||
|
||||
/**
|
||||
* Updates settings for building clients. Any client cache is cleared. Future
|
||||
* client requests will use the new refreshed settings.
|
||||
*
|
||||
* @param clientsSettings the settings for new clients
|
||||
* @return the old settings
|
||||
*/
|
||||
Map<String, AzureStorageSettings> refreshAndClearCache(Map<String, AzureStorageSettings> clientsSettings);
|
||||
|
||||
ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
|
||||
ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);
|
||||
|
||||
boolean doesContainerExist(String account, LocationMode mode, String container);
|
||||
boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException;
|
||||
|
||||
void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException;
|
||||
void removeContainer(String account, String container) throws URISyntaxException, StorageException;
|
||||
|
||||
void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException;
|
||||
void createContainer(String account, String container) throws URISyntaxException, StorageException;
|
||||
|
||||
void deleteFiles(String account, LocationMode mode, String container, String path) throws URISyntaxException, StorageException;
|
||||
void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException;
|
||||
|
||||
boolean blobExists(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException;
|
||||
boolean blobExists(String account, String container, String blob) throws URISyntaxException, StorageException;
|
||||
|
||||
void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException;
|
||||
void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException;
|
||||
|
||||
InputStream getInputStream(String account, LocationMode mode, String container, String blob)
|
||||
throws URISyntaxException, StorageException, IOException;
|
||||
InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException, IOException;
|
||||
|
||||
Map<String,BlobMetaData> listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix)
|
||||
throws URISyntaxException, StorageException;
|
||||
Map<String, BlobMetaData> listBlobsByPrefix(String account, String container, String keyPath, String prefix)
|
||||
throws URISyntaxException, StorageException;
|
||||
|
||||
void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws
|
||||
URISyntaxException, StorageException, FileAlreadyExistsException;
|
||||
void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize)
|
||||
throws URISyntaxException, StorageException, FileAlreadyExistsException;
|
||||
|
||||
static InputStream giveSocketPermissionsToStream(InputStream stream) {
|
||||
return new InputStream() {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.repositories.azure;
|
|||
|
||||
import com.microsoft.azure.storage.AccessCondition;
|
||||
import com.microsoft.azure.storage.CloudStorageAccount;
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.OperationContext;
|
||||
import com.microsoft.azure.storage.RetryExponentialRetry;
|
||||
import com.microsoft.azure.storage.RetryPolicy;
|
||||
|
@ -36,164 +35,133 @@ import com.microsoft.azure.storage.blob.CloudBlockBlob;
|
|||
import com.microsoft.azure.storage.blob.DeleteSnapshotsOption;
|
||||
import com.microsoft.azure.storage.blob.ListBlobItem;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.repositories.RepositoryException;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService {
|
||||
|
||||
final Map<String, AzureStorageSettings> storageSettings;
|
||||
final Map<String, CloudBlobClient> clients;
|
||||
// 'package' for testing
|
||||
volatile Map<String, AzureStorageSettings> storageSettings = emptyMap();
|
||||
|
||||
public AzureStorageServiceImpl(Settings settings, Map<String, AzureStorageSettings> storageSettings) {
|
||||
public AzureStorageServiceImpl(Settings settings) {
|
||||
super(settings);
|
||||
if (storageSettings.isEmpty()) {
|
||||
// If someone did not register any settings, they basically can't use the plugin
|
||||
throw new IllegalArgumentException("If you want to use an azure repository, you need to define a client configuration.");
|
||||
}
|
||||
this.storageSettings = storageSettings;
|
||||
this.clients = createClients(storageSettings);
|
||||
// eagerly load client settings so that secure settings are read
|
||||
final Map<String, AzureStorageSettings> clientsSettings = AzureStorageSettings.load(settings);
|
||||
refreshAndClearCache(clientsSettings);
|
||||
}
|
||||
|
||||
private Map<String, CloudBlobClient> createClients(final Map<String, AzureStorageSettings> storageSettings) {
|
||||
final Map<String, CloudBlobClient> clients = new HashMap<>();
|
||||
for (Map.Entry<String, AzureStorageSettings> azureStorageEntry : storageSettings.entrySet()) {
|
||||
final String clientName = azureStorageEntry.getKey();
|
||||
final AzureStorageSettings clientSettings = azureStorageEntry.getValue();
|
||||
try {
|
||||
logger.trace("creating new Azure storage client with name [{}]", clientName);
|
||||
String storageConnectionString =
|
||||
"DefaultEndpointsProtocol=https;"
|
||||
+ "AccountName=" + clientSettings.getAccount() + ";"
|
||||
+ "AccountKey=" + clientSettings.getKey();
|
||||
|
||||
final String endpointSuffix = clientSettings.getEndpointSuffix();
|
||||
if (Strings.hasLength(endpointSuffix)) {
|
||||
storageConnectionString += ";EndpointSuffix=" + endpointSuffix;
|
||||
}
|
||||
// Retrieve storage account from connection-string.
|
||||
CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString);
|
||||
|
||||
// Create the blob client.
|
||||
CloudBlobClient client = storageAccount.createCloudBlobClient();
|
||||
|
||||
// Register the client
|
||||
clients.put(clientSettings.getAccount(), client);
|
||||
} catch (Exception e) {
|
||||
logger.error(() -> new ParameterizedMessage("Can not create azure storage client [{}]", clientName), e);
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(clients);
|
||||
}
|
||||
|
||||
CloudBlobClient getSelectedClient(String clientName, LocationMode mode) {
|
||||
logger.trace("selecting a client named [{}], mode [{}]", clientName, mode.name());
|
||||
AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName);
|
||||
@Override
|
||||
public Tuple<CloudBlobClient, Supplier<OperationContext>> client(String clientName) {
|
||||
final AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName);
|
||||
if (azureStorageSettings == null) {
|
||||
throw new IllegalArgumentException("Unable to find client with name [" + clientName + "]");
|
||||
throw new SettingsException("Unable to find client with name [" + clientName + "]");
|
||||
}
|
||||
|
||||
CloudBlobClient client = this.clients.get(azureStorageSettings.getAccount());
|
||||
if (client == null) {
|
||||
throw new IllegalArgumentException("No account defined for client with name [" + clientName + "]");
|
||||
try {
|
||||
return new Tuple<>(buildClient(azureStorageSettings), () -> buildOperationContext(azureStorageSettings));
|
||||
} catch (InvalidKeyException | URISyntaxException | IllegalArgumentException e) {
|
||||
throw new SettingsException("Invalid azure client settings with name [" + clientName + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: for now, just set the location mode in case it is different;
|
||||
// only one mode per storage clientName can be active at a time
|
||||
client.getDefaultRequestOptions().setLocationMode(mode);
|
||||
|
||||
// Set timeout option if the user sets cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout (it's negative by default)
|
||||
if (azureStorageSettings.getTimeout().getSeconds() > 0) {
|
||||
try {
|
||||
int timeout = (int) azureStorageSettings.getTimeout().getMillis();
|
||||
client.getDefaultRequestOptions().setTimeoutIntervalInMs(timeout);
|
||||
} catch (ClassCastException e) {
|
||||
throw new IllegalArgumentException("Can not convert [" + azureStorageSettings.getTimeout() +
|
||||
"]. It can not be longer than 2,147,483,647ms.");
|
||||
protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException {
|
||||
final CloudBlobClient client = createClient(azureStorageSettings);
|
||||
// Set timeout option if the user sets cloud.azure.storage.timeout or
|
||||
// cloud.azure.storage.xxx.timeout (it's negative by default)
|
||||
final long timeout = azureStorageSettings.getTimeout().getMillis();
|
||||
if (timeout > 0) {
|
||||
if (timeout > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms.");
|
||||
}
|
||||
client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout);
|
||||
}
|
||||
|
||||
// We define a default exponential retry policy
|
||||
client.getDefaultRequestOptions().setRetryPolicyFactory(
|
||||
new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries()));
|
||||
|
||||
client.getDefaultRequestOptions()
|
||||
.setRetryPolicyFactory(new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries()));
|
||||
client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode());
|
||||
return client;
|
||||
}
|
||||
|
||||
private OperationContext generateOperationContext(String clientName) {
|
||||
OperationContext context = new OperationContext();
|
||||
AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName);
|
||||
|
||||
if (azureStorageSettings.getProxy() != null) {
|
||||
context.setProxy(azureStorageSettings.getProxy());
|
||||
}
|
||||
protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException {
|
||||
final String connectionString = azureStorageSettings.buildConnectionString();
|
||||
return CloudStorageAccount.parse(connectionString).createCloudBlobClient();
|
||||
}
|
||||
|
||||
protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) {
|
||||
final OperationContext context = new OperationContext();
|
||||
context.setProxy(azureStorageSettings.getProxy());
|
||||
return context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean doesContainerExist(String account, LocationMode mode, String container) {
|
||||
try {
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)));
|
||||
} catch (Exception e) {
|
||||
logger.error("can not access container [{}]", container);
|
||||
}
|
||||
return false;
|
||||
public Map<String, AzureStorageSettings> refreshAndClearCache(Map<String, AzureStorageSettings> clientsSettings) {
|
||||
final Map<String, AzureStorageSettings> prevSettings = this.storageSettings;
|
||||
this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap();
|
||||
// clients are built lazily by {@link client(String)}
|
||||
return prevSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException {
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
logger.trace("removing container [{}]", container);
|
||||
SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, generateOperationContext(account)));
|
||||
public boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException {
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createContainer(String account, LocationMode mode, String container) throws URISyntaxException, StorageException {
|
||||
public void removeContainer(String account, String container) throws URISyntaxException, StorageException {
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
logger.trace(() -> new ParameterizedMessage("removing container [{}]", container));
|
||||
SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, client.v2().get()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createContainer(String account, String container) throws URISyntaxException, StorageException {
|
||||
try {
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
logger.trace("creating container [{}]", container);
|
||||
SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, generateOperationContext(account)));
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("fails creating container [{}]", container), e);
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
logger.trace(() -> new ParameterizedMessage("creating container [{}]", container));
|
||||
SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, client.v2().get()));
|
||||
} catch (final IllegalArgumentException e) {
|
||||
logger.trace(() -> new ParameterizedMessage("failed creating container [{}]", container), e);
|
||||
throw new RepositoryException(container, e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFiles(String account, LocationMode mode, String container, String path) throws URISyntaxException, StorageException {
|
||||
logger.trace("delete files container [{}], path [{}]", container, path);
|
||||
|
||||
// Container name must be lower case.
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException {
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
// container name must be lower case.
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path));
|
||||
SocketAccess.doPrivilegedVoidException(() -> {
|
||||
if (blobContainer.exists()) {
|
||||
// We list the blobs using a flat blob listing mode
|
||||
for (ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null,
|
||||
generateOperationContext(account))) {
|
||||
String blobName = blobNameFromUri(blobItem.getUri());
|
||||
logger.trace("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri());
|
||||
deleteBlob(account, mode, container, blobName);
|
||||
// list the blobs using a flat blob listing mode
|
||||
for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null,
|
||||
client.v2().get())) {
|
||||
final String blobName = blobNameFromUri(blobItem.getUri());
|
||||
logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri()));
|
||||
// don't call {@code #deleteBlob}, use the same client
|
||||
final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName);
|
||||
azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -205,85 +173,82 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureS
|
|||
* @param uri URI to parse
|
||||
* @return The blob name relative to the container
|
||||
*/
|
||||
public static String blobNameFromUri(URI uri) {
|
||||
String path = uri.getPath();
|
||||
|
||||
static String blobNameFromUri(URI uri) {
|
||||
final String path = uri.getPath();
|
||||
// We remove the container name from the path
|
||||
// The 3 magic number cames from the fact if path is /container/path/to/myfile
|
||||
// First occurrence is empty "/"
|
||||
// Second occurrence is "container
|
||||
// Last part contains "path/to/myfile" which is what we want to get
|
||||
String[] splits = path.split("/", 3);
|
||||
|
||||
final String[] splits = path.split("/", 3);
|
||||
// We return the remaining end of the string
|
||||
return splits[2];
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean blobExists(String account, LocationMode mode, String container, String blob)
|
||||
throws URISyntaxException, StorageException {
|
||||
public boolean blobExists(String account, String container, String blob)
|
||||
throws URISyntaxException, StorageException {
|
||||
// Container name must be lower case.
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) {
|
||||
CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob);
|
||||
return SocketAccess.doPrivilegedException(() -> azureBlob.exists(null, null, generateOperationContext(account)));
|
||||
}
|
||||
|
||||
return false;
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
return SocketAccess.doPrivilegedException(() -> {
|
||||
if (blobContainer.exists(null, null, client.v2().get())) {
|
||||
final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob);
|
||||
return azureBlob.exists(null, null, client.v2().get());
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException {
|
||||
logger.trace("delete blob for container [{}], blob [{}]", container, blob);
|
||||
|
||||
public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException {
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
// Container name must be lower case.
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
if (SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, generateOperationContext(account)))) {
|
||||
logger.trace("container [{}]: blob [{}] found. removing.", container, blob);
|
||||
CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob);
|
||||
SocketAccess.doPrivilegedVoidException(() -> azureBlob.delete(DeleteSnapshotsOption.NONE, null, null,
|
||||
generateOperationContext(account)));
|
||||
}
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
logger.trace(() -> new ParameterizedMessage("delete blob for container [{}], blob [{}]", container, blob));
|
||||
SocketAccess.doPrivilegedVoidException(() -> {
|
||||
if (blobContainer.exists(null, null, client.v2().get())) {
|
||||
final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob);
|
||||
logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob));
|
||||
azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException,
|
||||
public InputStream getInputStream(String account, String container, String blob) throws URISyntaxException,
|
||||
StorageException {
|
||||
logger.trace("reading container [{}], blob [{}]", container, blob);
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlockBlob blockBlobReference = client.getContainerReference(container).getBlockBlobReference(blob);
|
||||
BlobInputStream is = SocketAccess.doPrivilegedException(() ->
|
||||
blockBlobReference.openInputStream(null, null, generateOperationContext(account)));
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob);
|
||||
logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob));
|
||||
final BlobInputStream is = SocketAccess.doPrivilegedException(() ->
|
||||
blockBlobReference.openInputStream(null, null, client.v2().get()));
|
||||
return AzureStorageService.giveSocketPermissionsToStream(is);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, BlobMetaData> listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix)
|
||||
public Map<String, BlobMetaData> listBlobsByPrefix(String account, String container, String keyPath, String prefix)
|
||||
throws URISyntaxException, StorageException {
|
||||
// NOTE: this should be here: if (prefix == null) prefix = "";
|
||||
// however, this is really inefficient since deleteBlobsByPrefix enumerates everything and
|
||||
// then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix!
|
||||
|
||||
logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix);
|
||||
MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
|
||||
EnumSet<BlobListingDetails> enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA);
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
final MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
|
||||
final EnumSet<BlobListingDetails> enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA);
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
logger.trace(() -> new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix));
|
||||
SocketAccess.doPrivilegedVoidException(() -> {
|
||||
if (blobContainer.exists()) {
|
||||
for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false,
|
||||
enumBlobListingDetails, null, generateOperationContext(account))) {
|
||||
URI uri = blobItem.getUri();
|
||||
logger.trace("blob url [{}]", uri);
|
||||
|
||||
for (final ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false,
|
||||
enumBlobListingDetails, null, client.v2().get())) {
|
||||
final URI uri = blobItem.getUri();
|
||||
logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri));
|
||||
// uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/
|
||||
// this requires 1 + container.length() + 1, with each 1 corresponding to one of the /
|
||||
String blobPath = uri.getPath().substring(1 + container.length() + 1);
|
||||
BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties();
|
||||
String name = blobPath.substring(keyPath.length());
|
||||
logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength());
|
||||
final String blobPath = uri.getPath().substring(1 + container.length() + 1);
|
||||
final BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties();
|
||||
final String name = blobPath.substring(keyPath.length());
|
||||
logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()));
|
||||
blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength()));
|
||||
}
|
||||
}
|
||||
|
@ -292,22 +257,23 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureS
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize)
|
||||
public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize)
|
||||
throws URISyntaxException, StorageException, FileAlreadyExistsException {
|
||||
logger.trace("writeBlob({}, stream, {})", blobName, blobSize);
|
||||
CloudBlobClient client = this.getSelectedClient(account, mode);
|
||||
CloudBlobContainer blobContainer = client.getContainerReference(container);
|
||||
CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName);
|
||||
logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize));
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
|
||||
final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName);
|
||||
try {
|
||||
SocketAccess.doPrivilegedVoidException(() -> blob.upload(inputStream, blobSize, AccessCondition.generateIfNotExistsCondition(),
|
||||
null, generateOperationContext(account)));
|
||||
} catch (StorageException se) {
|
||||
null, client.v2().get()));
|
||||
} catch (final StorageException se) {
|
||||
if (se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT &&
|
||||
StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) {
|
||||
throw new FileAlreadyExistsException(blobName, null, se.getMessage());
|
||||
}
|
||||
throw se;
|
||||
}
|
||||
logger.trace("writeBlob({}, stream, {}) - done", blobName, blobSize);
|
||||
logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.RetryPolicy;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -29,7 +31,6 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Proxy;
|
||||
|
@ -39,7 +40,7 @@ import java.util.HashMap;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
public final class AzureStorageSettings {
|
||||
final class AzureStorageSettings {
|
||||
|
||||
// prefix for azure client settings
|
||||
private static final String AZURE_CLIENT_PREFIX_KEY = "azure.client.";
|
||||
|
@ -86,22 +87,33 @@ public final class AzureStorageSettings {
|
|||
private final TimeValue timeout;
|
||||
private final int maxRetries;
|
||||
private final Proxy proxy;
|
||||
private final LocationMode locationMode;
|
||||
|
||||
// copy-constructor
|
||||
private AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy proxy,
|
||||
LocationMode locationMode) {
|
||||
this.account = account;
|
||||
this.key = key;
|
||||
this.endpointSuffix = endpointSuffix;
|
||||
this.timeout = timeout;
|
||||
this.maxRetries = maxRetries;
|
||||
this.proxy = proxy;
|
||||
this.locationMode = locationMode;
|
||||
}
|
||||
|
||||
public AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries,
|
||||
AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries,
|
||||
Proxy.Type proxyType, String proxyHost, Integer proxyPort) {
|
||||
this.account = account;
|
||||
this.key = key;
|
||||
this.endpointSuffix = endpointSuffix;
|
||||
this.timeout = timeout;
|
||||
this.maxRetries = maxRetries;
|
||||
|
||||
// Register the proxy if we have any
|
||||
// Validate proxy settings
|
||||
if (proxyType.equals(Proxy.Type.DIRECT) && (proxyPort != 0 || Strings.hasText(proxyHost))) {
|
||||
if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) {
|
||||
throw new SettingsException("Azure Proxy port or host have been set but proxy type is not defined.");
|
||||
}
|
||||
if (proxyType.equals(Proxy.Type.DIRECT) == false && (proxyPort == 0 || Strings.isEmpty(proxyHost))) {
|
||||
if ((proxyType.equals(Proxy.Type.DIRECT) == false) && ((proxyPort == 0) || Strings.isEmpty(proxyHost))) {
|
||||
throw new SettingsException("Azure Proxy type has been set but proxy host or port is not defined.");
|
||||
}
|
||||
|
||||
|
@ -110,10 +122,11 @@ public final class AzureStorageSettings {
|
|||
} else {
|
||||
try {
|
||||
proxy = new Proxy(proxyType, new InetSocketAddress(InetAddress.getByName(proxyHost), proxyPort));
|
||||
} catch (UnknownHostException e) {
|
||||
} catch (final UnknownHostException e) {
|
||||
throw new SettingsException("Azure proxy host is unknown.", e);
|
||||
}
|
||||
}
|
||||
this.locationMode = LocationMode.PRIMARY_ONLY;
|
||||
}
|
||||
|
||||
public String getKey() {
|
||||
|
@ -140,37 +153,55 @@ public final class AzureStorageSettings {
|
|||
return proxy;
|
||||
}
|
||||
|
||||
public String buildConnectionString() {
|
||||
final StringBuilder connectionStringBuilder = new StringBuilder();
|
||||
connectionStringBuilder.append("DefaultEndpointsProtocol=https")
|
||||
.append(";AccountName=")
|
||||
.append(account)
|
||||
.append(";AccountKey=")
|
||||
.append(key);
|
||||
if (Strings.hasText(endpointSuffix)) {
|
||||
connectionStringBuilder.append(";EndpointSuffix=").append(endpointSuffix);
|
||||
}
|
||||
return connectionStringBuilder.toString();
|
||||
}
|
||||
|
||||
public LocationMode getLocationMode() {
|
||||
return locationMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("AzureStorageSettings{");
|
||||
sb.append(", account='").append(account).append('\'');
|
||||
sb.append("account='").append(account).append('\'');
|
||||
sb.append(", key='").append(key).append('\'');
|
||||
sb.append(", timeout=").append(timeout);
|
||||
sb.append(", endpointSuffix='").append(endpointSuffix).append('\'');
|
||||
sb.append(", maxRetries=").append(maxRetries);
|
||||
sb.append(", proxy=").append(proxy);
|
||||
sb.append(", locationMode='").append(locationMode).append('\'');
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses settings and read all settings available under azure.client.*
|
||||
* Parse and read all settings available under the azure.client.* namespace
|
||||
* @param settings settings to parse
|
||||
* @return All the named configurations
|
||||
*/
|
||||
public static Map<String, AzureStorageSettings> load(Settings settings) {
|
||||
// Get the list of existing named configurations
|
||||
Map<String, AzureStorageSettings> storageSettings = new HashMap<>();
|
||||
for (String clientName : ACCOUNT_SETTING.getNamespaces(settings)) {
|
||||
final Map<String, AzureStorageSettings> storageSettings = new HashMap<>();
|
||||
for (final String clientName : ACCOUNT_SETTING.getNamespaces(settings)) {
|
||||
storageSettings.put(clientName, getClientSettings(settings, clientName));
|
||||
}
|
||||
|
||||
if (storageSettings.containsKey("default") == false && storageSettings.isEmpty() == false) {
|
||||
if (false == storageSettings.containsKey("default") && false == storageSettings.isEmpty()) {
|
||||
// in case no setting named "default" has been set, let's define our "default"
|
||||
// as the first named config we get
|
||||
AzureStorageSettings defaultSettings = storageSettings.values().iterator().next();
|
||||
final AzureStorageSettings defaultSettings = storageSettings.values().iterator().next();
|
||||
storageSettings.put("default", defaultSettings);
|
||||
}
|
||||
assert storageSettings.containsKey("default") || storageSettings.isEmpty() : "always have 'default' if any";
|
||||
return Collections.unmodifiableMap(storageSettings);
|
||||
}
|
||||
|
||||
|
@ -191,13 +222,25 @@ public final class AzureStorageSettings {
|
|||
|
||||
private static <T> T getConfigValue(Settings settings, String clientName,
|
||||
Setting.AffixSetting<T> clientSetting) {
|
||||
Setting<T> concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName);
|
||||
final Setting<T> concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName);
|
||||
return concreteSetting.get(settings);
|
||||
}
|
||||
|
||||
public static <T> T getValue(Settings settings, String groupName, Setting<T> setting) {
|
||||
Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey();
|
||||
String fullKey = k.toConcreteKey(groupName).toString();
|
||||
final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey();
|
||||
final String fullKey = k.toConcreteKey(groupName).toString();
|
||||
return setting.getConcreteSetting(fullKey).get(settings);
|
||||
}
|
||||
|
||||
static Map<String, AzureStorageSettings> overrideLocationMode(Map<String, AzureStorageSettings> clientsSettings,
|
||||
LocationMode locationMode) {
|
||||
final MapBuilder<String, AzureStorageSettings> mapBuilder = new MapBuilder<>();
|
||||
for (final Map.Entry<String, AzureStorageSettings> entry : clientsSettings.entrySet()) {
|
||||
final AzureStorageSettings azureSettings = new AzureStorageSettings(entry.getValue().account, entry.getValue().key,
|
||||
entry.getValue().endpointSuffix, entry.getValue().timeout, entry.getValue().maxRetries, entry.getValue().proxy,
|
||||
locationMode);
|
||||
mapBuilder.put(entry.getKey(), azureSettings);
|
||||
}
|
||||
return mapBuilder.immutableMap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.io.IOException;
|
|||
import java.net.URISyntaxException;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class AzureRepositorySettingsTests extends ESTestCase {
|
||||
|
||||
|
@ -44,7 +45,7 @@ public class AzureRepositorySettingsTests extends ESTestCase {
|
|||
.put(settings)
|
||||
.build();
|
||||
return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings),
|
||||
TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, null);
|
||||
TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, mock(AzureStorageService.class));
|
||||
}
|
||||
|
||||
public void testReadonlyDefault() throws StorageException, IOException, URISyntaxException {
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
|
@ -77,9 +75,9 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
return Settings.builder().setSecureSettings(generateMockSecureSettings());
|
||||
}
|
||||
|
||||
@SuppressWarnings("resource")
|
||||
private static AzureStorageService getAzureStorageService() {
|
||||
return new AzureStorageServiceImpl(generateMockSettings().build(),
|
||||
AzureStorageSettings.load(generateMockSettings().build()));
|
||||
return new AzureRepositoryPlugin(generateMockSettings().build()).azureStoreService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -94,7 +92,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
* there mustn't be a hyphen between the 2 concatenated numbers
|
||||
* (can't have 2 consecutives hyphens on Azure containers)
|
||||
*/
|
||||
String testName = "snapshot-itest-"
|
||||
final String testName = "snapshot-itest-"
|
||||
.concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT));
|
||||
return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName;
|
||||
}
|
||||
|
@ -123,7 +121,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
// It could happen that we run this test really close to a previous one
|
||||
// so we might need some time to be able to create the container
|
||||
assertBusy(() -> {
|
||||
getAzureStorageService().createContainer("default", LocationMode.PRIMARY_ONLY, containerName);
|
||||
getAzureStorageService().createContainer("default", containerName);
|
||||
}, 30, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
|
@ -132,7 +130,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
* @param containerName container name to use
|
||||
*/
|
||||
private static void removeTestContainer(String containerName) throws URISyntaxException, StorageException {
|
||||
getAzureStorageService().removeContainer("default", LocationMode.PRIMARY_ONLY, containerName);
|
||||
getAzureStorageService().removeContainer("default", containerName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -141,7 +139,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
}
|
||||
|
||||
private String getRepositoryPath() {
|
||||
String testName = "it-" + getTestName();
|
||||
final String testName = "it-" + getTestName();
|
||||
return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName;
|
||||
}
|
||||
|
||||
|
@ -159,21 +157,21 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
public final void wipeAzureRepositories() {
|
||||
try {
|
||||
client().admin().cluster().prepareDeleteRepository("*").get();
|
||||
} catch (RepositoryMissingException ignored) {
|
||||
} catch (final RepositoryMissingException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
public void testMultipleRepositories() {
|
||||
Client client = client();
|
||||
final Client client = client();
|
||||
logger.info("--> creating azure repository with path [{}]", getRepositoryPath());
|
||||
PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1")
|
||||
final PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1")
|
||||
.setType("azure").setSettings(Settings.builder()
|
||||
.put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-1"))
|
||||
.put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath())
|
||||
.put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true));
|
||||
PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2")
|
||||
final PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2")
|
||||
.setType("azure").setSettings(Settings.builder()
|
||||
.put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-2"))
|
||||
.put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath())
|
||||
|
@ -194,14 +192,14 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
||||
logger.info("--> snapshot 1");
|
||||
CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap")
|
||||
final CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap")
|
||||
.setWaitForCompletion(true).setIndices("test-idx-1").get();
|
||||
assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(),
|
||||
equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards()));
|
||||
|
||||
logger.info("--> snapshot 2");
|
||||
CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap")
|
||||
final CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap")
|
||||
.setWaitForCompletion(true).setIndices("test-idx-2").get();
|
||||
assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(),
|
||||
|
@ -216,7 +214,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
logger.info("--> delete indices");
|
||||
cluster().wipeIndices("test-idx-1", "test-idx-2");
|
||||
logger.info("--> restore one index after deletion from snapshot 1");
|
||||
RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap")
|
||||
final RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap")
|
||||
.setWaitForCompletion(true).setIndices("test-idx-1").get();
|
||||
assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
ensureGreen();
|
||||
|
@ -226,7 +224,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
|
||||
|
||||
logger.info("--> restore other index after deletion from snapshot 2");
|
||||
RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap")
|
||||
final RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap")
|
||||
.setWaitForCompletion(true).setIndices("test-idx-2").get();
|
||||
assertThat(restoreSnapshotResponse2.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
ensureGreen();
|
||||
|
@ -252,7 +250,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
}
|
||||
refresh();
|
||||
|
||||
ClusterAdminClient client = client().admin().cluster();
|
||||
final ClusterAdminClient client = client().admin().cluster();
|
||||
logger.info("--> creating azure repository without any path");
|
||||
PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure")
|
||||
.setSettings(Settings.builder()
|
||||
|
@ -300,9 +298,9 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
*/
|
||||
public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException {
|
||||
final String repositoryName="test-repo-28";
|
||||
ClusterAdminClient client = client().admin().cluster();
|
||||
final ClusterAdminClient client = client().admin().cluster();
|
||||
logger.info("--> creating azure repository without any path");
|
||||
PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure")
|
||||
final PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure")
|
||||
.setSettings(Settings.builder()
|
||||
.put(Repository.CONTAINER_SETTING.getKey(), getContainerName())
|
||||
).get();
|
||||
|
@ -311,14 +309,14 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
try {
|
||||
client.prepareGetSnapshots(repositoryName).addSnapshots("nonexistingsnapshotname").get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (SnapshotMissingException ex) {
|
||||
} catch (final SnapshotMissingException ex) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
try {
|
||||
client.prepareDeleteSnapshot(repositoryName, "nonexistingsnapshotname").get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (SnapshotMissingException ex) {
|
||||
} catch (final SnapshotMissingException ex) {
|
||||
// Expected
|
||||
}
|
||||
}
|
||||
|
@ -328,9 +326,9 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
*/
|
||||
public void testNonExistingRepo_23() {
|
||||
final String repositoryName = "test-repo-test23";
|
||||
Client client = client();
|
||||
final Client client = client();
|
||||
logger.info("--> creating azure repository with path [{}]", getRepositoryPath());
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName)
|
||||
final PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName)
|
||||
.setType("azure").setSettings(Settings.builder()
|
||||
.put(Repository.CONTAINER_SETTING.getKey(), getContainerName())
|
||||
.put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath())
|
||||
|
@ -342,7 +340,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
try {
|
||||
client.admin().cluster().prepareRestoreSnapshot(repositoryName, "no-existing-snapshot").setWaitForCompletion(true).get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (SnapshotRestoreException ex) {
|
||||
} catch (final SnapshotRestoreException ex) {
|
||||
// Expected
|
||||
}
|
||||
}
|
||||
|
@ -356,7 +354,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
createTestContainer(container);
|
||||
removeTestContainer(container);
|
||||
|
||||
ClusterAdminClient client = client().admin().cluster();
|
||||
final ClusterAdminClient client = client().admin().cluster();
|
||||
logger.info("--> creating azure repository while container is being removed");
|
||||
try {
|
||||
client.preparePutRepository("test-repo").setType("azure")
|
||||
|
@ -364,7 +362,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
.put(Repository.CONTAINER_SETTING.getKey(), container)
|
||||
).get();
|
||||
fail("we should get a RepositoryVerificationException");
|
||||
} catch (RepositoryVerificationException e) {
|
||||
} catch (final RepositoryVerificationException e) {
|
||||
// Fine we expect that
|
||||
}
|
||||
}
|
||||
|
@ -378,9 +376,9 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
* @throws Exception If anything goes wrong
|
||||
*/
|
||||
public void testGeoRedundantStorage() throws Exception {
|
||||
Client client = client();
|
||||
final Client client = client();
|
||||
logger.info("--> creating azure primary repository");
|
||||
PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary")
|
||||
final PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary")
|
||||
.setType("azure").setSettings(Settings.builder()
|
||||
.put(Repository.CONTAINER_SETTING.getKey(), getContainerName())
|
||||
).get();
|
||||
|
@ -394,7 +392,7 @@ public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCas
|
|||
assertThat(endWait - startWait, lessThanOrEqualTo(30000L));
|
||||
|
||||
logger.info("--> creating azure secondary repository");
|
||||
PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary")
|
||||
final PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary")
|
||||
.setType("azure").setSettings(Settings.builder()
|
||||
.put(Repository.CONTAINER_SETTING.getKey(), getContainerName())
|
||||
.put(Repository.LOCATION_MODE_SETTING.getKey(), "secondary_only")
|
||||
|
|
|
@ -19,11 +19,14 @@
|
|||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.OperationContext;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.blob.CloudBlobClient;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.Streams;
|
||||
|
@ -40,6 +43,9 @@ import java.security.AccessController;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
/**
|
||||
* In memory storage for unit tests
|
||||
|
@ -53,44 +59,44 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean doesContainerExist(String account, LocationMode mode, String container) {
|
||||
public boolean doesContainerExist(String account, String container) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeContainer(String account, LocationMode mode, String container) {
|
||||
public void removeContainer(String account, String container) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createContainer(String account, LocationMode mode, String container) {
|
||||
public void createContainer(String account, String container) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteFiles(String account, LocationMode mode, String container, String path) {
|
||||
final Map<String, BlobMetaData> blobs = listBlobsByPrefix(account, mode, container, path, null);
|
||||
blobs.keySet().forEach(key -> deleteBlob(account, mode, container, key));
|
||||
public void deleteFiles(String account, String container, String path) {
|
||||
final Map<String, BlobMetaData> blobs = listBlobsByPrefix(account, container, path, null);
|
||||
blobs.keySet().forEach(key -> deleteBlob(account, container, key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean blobExists(String account, LocationMode mode, String container, String blob) {
|
||||
public boolean blobExists(String account, String container, String blob) {
|
||||
return blobs.containsKey(blob);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlob(String account, LocationMode mode, String container, String blob) {
|
||||
public void deleteBlob(String account, String container, String blob) {
|
||||
blobs.remove(blob);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws IOException {
|
||||
if (!blobExists(account, mode, container, blob)) {
|
||||
public InputStream getInputStream(String account, String container, String blob) throws IOException {
|
||||
if (!blobExists(account, container, blob)) {
|
||||
throw new NoSuchFileException("missing blob [" + blob + "]");
|
||||
}
|
||||
return AzureStorageService.giveSocketPermissionsToStream(new PermissionRequiringInputStream(blobs.get(blob).toByteArray()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, BlobMetaData> listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) {
|
||||
public Map<String, BlobMetaData> listBlobsByPrefix(String account, String container, String keyPath, String prefix) {
|
||||
MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
|
||||
blobs.forEach((String blobName, ByteArrayOutputStream bos) -> {
|
||||
final String checkBlob;
|
||||
|
@ -108,7 +114,7 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize)
|
||||
public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize)
|
||||
throws URISyntaxException, StorageException, FileAlreadyExistsException {
|
||||
if (blobs.containsKey(blobName)) {
|
||||
throw new FileAlreadyExistsException(blobName);
|
||||
|
@ -168,4 +174,14 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS
|
|||
return super.read(b, off, len);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Tuple<CloudBlobClient, Supplier<OperationContext>> client(String clientName) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, AzureStorageSettings> refreshAndClearCache(Map<String, AzureStorageSettings> clientsSettings) {
|
||||
return emptyMap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.RetryExponentialRetry;
|
||||
import com.microsoft.azure.storage.blob.CloudBlobClient;
|
||||
import com.microsoft.azure.storage.core.Base64;
|
||||
|
@ -28,6 +27,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Proxy;
|
||||
|
@ -35,7 +35,6 @@ import java.net.URI;
|
|||
import java.net.URISyntaxException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.repositories.azure.AzureStorageServiceImpl.blobNameFromUri;
|
||||
|
@ -50,17 +49,10 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
public class AzureStorageServiceTests extends ESTestCase {
|
||||
|
||||
public void testReadSecuredSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("azure.client.azure1.account", "myaccount1");
|
||||
secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1"));
|
||||
secureSettings.setString("azure.client.azure2.account", "myaccount2");
|
||||
secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2"));
|
||||
secureSettings.setString("azure.client.azure3.account", "myaccount3");
|
||||
secureSettings.setString("azure.client.azure3.key", encodeKey("mykey3"));
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings)
|
||||
final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build();
|
||||
|
||||
Map<String, AzureStorageSettings> loadedSettings = AzureStorageSettings.load(settings);
|
||||
final Map<String, AzureStorageSettings> loadedSettings = AzureStorageSettings.load(settings);
|
||||
assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1","azure2","azure3","default"));
|
||||
|
||||
assertThat(loadedSettings.get("azure1").getEndpointSuffix(), isEmptyString());
|
||||
|
@ -68,95 +60,161 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix"));
|
||||
}
|
||||
|
||||
public void testCreateClientWithEndpointSuffix() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("azure.client.azure1.account", "myaccount1");
|
||||
secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1"));
|
||||
secureSettings.setString("azure.client.azure2.account", "myaccount2");
|
||||
secureSettings.setString("azure.client.azure2.key", encodeKey("mykey2"));
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings)
|
||||
public void testCreateClientWithEndpointSuffix() throws IOException {
|
||||
final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build();
|
||||
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings));
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix"));
|
||||
|
||||
CloudBlobClient client2 = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net"));
|
||||
try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) {
|
||||
final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService;
|
||||
final CloudBlobClient client1 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix"));
|
||||
final CloudBlobClient client2 = azureStorageService.client("azure2").v1();
|
||||
assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetSelectedClientWithNoPrimaryAndSecondary() {
|
||||
try {
|
||||
new AzureStorageServiceImpl(Settings.EMPTY, Collections.emptyMap());
|
||||
fail("we should have raised an IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
public void testReinitClientSettings() throws IOException {
|
||||
final MockSecureSettings secureSettings1 = new MockSecureSettings();
|
||||
secureSettings1.setString("azure.client.azure1.account", "myaccount11");
|
||||
secureSettings1.setString("azure.client.azure1.key", encodeKey("mykey11"));
|
||||
secureSettings1.setString("azure.client.azure2.account", "myaccount12");
|
||||
secureSettings1.setString("azure.client.azure2.key", encodeKey("mykey12"));
|
||||
final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build();
|
||||
final MockSecureSettings secureSettings2 = new MockSecureSettings();
|
||||
secureSettings2.setString("azure.client.azure1.account", "myaccount21");
|
||||
secureSettings2.setString("azure.client.azure1.key", encodeKey("mykey21"));
|
||||
secureSettings2.setString("azure.client.azure3.account", "myaccount23");
|
||||
secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23"));
|
||||
final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build();
|
||||
try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) {
|
||||
final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService;
|
||||
final CloudBlobClient client11 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net"));
|
||||
final CloudBlobClient client12 = azureStorageService.client("azure2").v1();
|
||||
assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net"));
|
||||
// client 3 is missing
|
||||
final SettingsException e1 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure3"));
|
||||
assertThat(e1.getMessage(), is("Unable to find client with name [azure3]"));
|
||||
// update client settings
|
||||
plugin.reload(settings2);
|
||||
// old client 1 not changed
|
||||
assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net"));
|
||||
// new client 1 is changed
|
||||
final CloudBlobClient client21 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount21.blob.core.windows.net"));
|
||||
// old client 2 not changed
|
||||
assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net"));
|
||||
// new client2 is gone
|
||||
final SettingsException e2 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure2"));
|
||||
assertThat(e2.getMessage(), is("Unable to find client with name [azure2]"));
|
||||
// client 3 emerged
|
||||
final CloudBlobClient client23 = azureStorageService.client("azure3").v1();
|
||||
assertThat(client23.getEndpoint().toString(), equalTo("https://myaccount23.blob.core.windows.net"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testReinitClientEmptySettings() throws IOException {
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("azure.client.azure1.account", "myaccount1");
|
||||
secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11"));
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) {
|
||||
final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService;
|
||||
final CloudBlobClient client11 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net"));
|
||||
// reinit with empty settings
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reload(Settings.EMPTY));
|
||||
assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration."));
|
||||
// existing client untouched
|
||||
assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net"));
|
||||
// new client also untouched
|
||||
final CloudBlobClient client21 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testReinitClientWrongSettings() throws IOException {
|
||||
final MockSecureSettings secureSettings1 = new MockSecureSettings();
|
||||
secureSettings1.setString("azure.client.azure1.account", "myaccount1");
|
||||
secureSettings1.setString("azure.client.azure1.key", encodeKey("mykey11"));
|
||||
final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build();
|
||||
final MockSecureSettings secureSettings2 = new MockSecureSettings();
|
||||
secureSettings2.setString("azure.client.azure1.account", "myaccount1");
|
||||
// missing key
|
||||
final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build();
|
||||
try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) {
|
||||
final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService;
|
||||
final CloudBlobClient client11 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net"));
|
||||
plugin.reload(settings2);
|
||||
// existing client untouched
|
||||
assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net"));
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure1"));
|
||||
assertThat(e.getMessage(), is("Invalid azure client settings with name [azure1]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetSelectedClientNonExisting() {
|
||||
AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings());
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
|
||||
azureStorageService.getSelectedClient("azure4", LocationMode.PRIMARY_ONLY);
|
||||
});
|
||||
final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings());
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4"));
|
||||
assertThat(e.getMessage(), is("Unable to find client with name [azure4]"));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientDefaultTimeout() {
|
||||
Settings timeoutSettings = Settings.builder()
|
||||
final Settings timeoutSettings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure3.timeout", "30s")
|
||||
.build();
|
||||
AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings);
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings);
|
||||
final CloudBlobClient client1 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue());
|
||||
CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY);
|
||||
final CloudBlobClient client3 = azureStorageService.client("azure3").v1();
|
||||
assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientNoTimeout() {
|
||||
AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings());
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings());
|
||||
final CloudBlobClient client1 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue()));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientBackoffPolicy() {
|
||||
AzureStorageServiceImpl azureStorageService = createAzureService(buildSettings());
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings());
|
||||
final CloudBlobClient client1 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue()));
|
||||
assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientBackoffPolicyNbRetries() {
|
||||
Settings timeoutSettings = Settings.builder()
|
||||
final Settings timeoutSettings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.max_retries", 7)
|
||||
.build();
|
||||
|
||||
AzureStorageServiceImpl azureStorageService = createAzureService(timeoutSettings);
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings);
|
||||
final CloudBlobClient client1 = azureStorageService.client("azure1").v1();
|
||||
assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue()));
|
||||
assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class));
|
||||
}
|
||||
|
||||
public void testNoProxy() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.build();
|
||||
AzureStorageServiceImpl mock = createAzureService(settings);
|
||||
final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings);
|
||||
assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue());
|
||||
assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue());
|
||||
assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue());
|
||||
}
|
||||
|
||||
public void testProxyHttp() throws UnknownHostException {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.host", "127.0.0.1")
|
||||
.put("azure.client.azure1.proxy.port", 8080)
|
||||
.put("azure.client.azure1.proxy.type", "http")
|
||||
.build();
|
||||
AzureStorageServiceImpl mock = createAzureService(settings);
|
||||
Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy();
|
||||
final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings);
|
||||
final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy();
|
||||
|
||||
assertThat(azure1Proxy, notNullValue());
|
||||
assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP));
|
||||
|
@ -166,7 +224,7 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testMultipleProxies() throws UnknownHostException {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.host", "127.0.0.1")
|
||||
.put("azure.client.azure1.proxy.port", 8080)
|
||||
|
@ -175,12 +233,12 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
.put("azure.client.azure2.proxy.port", 8081)
|
||||
.put("azure.client.azure2.proxy.type", "http")
|
||||
.build();
|
||||
AzureStorageServiceImpl mock = createAzureService(settings);
|
||||
Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy();
|
||||
final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings);
|
||||
final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy();
|
||||
assertThat(azure1Proxy, notNullValue());
|
||||
assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP));
|
||||
assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080)));
|
||||
Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy();
|
||||
final Proxy azure2Proxy = mock.storageSettings.get("azure2").getProxy();
|
||||
assertThat(azure2Proxy, notNullValue());
|
||||
assertThat(azure2Proxy.type(), is(Proxy.Type.HTTP));
|
||||
assertThat(azure2Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8081)));
|
||||
|
@ -188,14 +246,14 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testProxySocks() throws UnknownHostException {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.host", "127.0.0.1")
|
||||
.put("azure.client.azure1.proxy.port", 8080)
|
||||
.put("azure.client.azure1.proxy.type", "socks")
|
||||
.build();
|
||||
AzureStorageServiceImpl mock = createAzureService(settings);
|
||||
Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy();
|
||||
final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings);
|
||||
final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy();
|
||||
assertThat(azure1Proxy, notNullValue());
|
||||
assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS));
|
||||
assertThat(azure1Proxy.address(), is(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 8080)));
|
||||
|
@ -204,47 +262,46 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testProxyNoHost() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.port", 8080)
|
||||
.put("azure.client.azure1.proxy.type", randomFrom("socks", "http"))
|
||||
.build();
|
||||
|
||||
SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings));
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings));
|
||||
assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testProxyNoPort() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.host", "127.0.0.1")
|
||||
.put("azure.client.azure1.proxy.type", randomFrom("socks", "http"))
|
||||
.build();
|
||||
|
||||
SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings));
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings));
|
||||
assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testProxyNoType() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.host", "127.0.0.1")
|
||||
.put("azure.client.azure1.proxy.port", 8080)
|
||||
.build();
|
||||
|
||||
SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings));
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings));
|
||||
assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testProxyWrongHost() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(buildSecureSettings())
|
||||
.put("azure.client.azure1.proxy.type", randomFrom("socks", "http"))
|
||||
.put("azure.client.azure1.proxy.host", "thisisnotavalidhostorwehavebeensuperunlucky")
|
||||
.put("azure.client.azure1.proxy.port", 8080)
|
||||
.build();
|
||||
|
||||
SettingsException e = expectThrows(SettingsException.class, () -> createAzureService(settings));
|
||||
final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings));
|
||||
assertEquals("Azure proxy host is unknown.", e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -260,7 +317,7 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private static MockSecureSettings buildSecureSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("azure.client.azure1.account", "myaccount1");
|
||||
secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1"));
|
||||
secureSettings.setString("azure.client.azure2.account", "myaccount2");
|
||||
|
@ -274,10 +331,6 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
return Settings.builder().setSecureSettings(buildSecureSettings()).build();
|
||||
}
|
||||
|
||||
private static AzureStorageServiceImpl createAzureService(final Settings settings) {
|
||||
return new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings));
|
||||
}
|
||||
|
||||
private static String encodeKey(final String value) {
|
||||
return Base64.encode(value.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
|
|
@ -64,18 +64,24 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload
|
||||
private static final int LARGE_BLOB_THRESHOLD_BYTE_SIZE = 5 * 1024 * 1024;
|
||||
|
||||
private final Storage storage;
|
||||
private final String bucket;
|
||||
private final String bucketName;
|
||||
private final String clientName;
|
||||
private final GoogleCloudStorageService storageService;
|
||||
|
||||
GoogleCloudStorageBlobStore(Settings settings, String bucket, Storage storage) {
|
||||
GoogleCloudStorageBlobStore(Settings settings, String bucketName, String clientName, GoogleCloudStorageService storageService) {
|
||||
super(settings);
|
||||
this.bucket = bucket;
|
||||
this.storage = storage;
|
||||
if (doesBucketExist(bucket) == false) {
|
||||
throw new BlobStoreException("Bucket [" + bucket + "] does not exist");
|
||||
this.bucketName = bucketName;
|
||||
this.clientName = clientName;
|
||||
this.storageService = storageService;
|
||||
if (doesBucketExist(bucketName) == false) {
|
||||
throw new BlobStoreException("Bucket [" + bucketName + "] does not exist");
|
||||
}
|
||||
}
|
||||
|
||||
private Storage client() throws IOException {
|
||||
return storageService.client(clientName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlobContainer blobContainer(BlobPath path) {
|
||||
return new GoogleCloudStorageBlobContainer(path, this);
|
||||
|
@ -91,14 +97,14 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
}
|
||||
|
||||
/**
|
||||
* Return true if the given bucket exists
|
||||
* Return true iff the given bucket exists
|
||||
*
|
||||
* @param bucketName name of the bucket
|
||||
* @return true if the bucket exists, false otherwise
|
||||
* @return true iff the bucket exists
|
||||
*/
|
||||
boolean doesBucketExist(String bucketName) {
|
||||
try {
|
||||
final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> storage.get(bucketName));
|
||||
final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> client().get(bucketName));
|
||||
return bucket != null;
|
||||
} catch (final Exception e) {
|
||||
throw new BlobStoreException("Unable to check if bucket [" + bucketName + "] exists", e);
|
||||
|
@ -106,10 +112,9 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
}
|
||||
|
||||
/**
|
||||
* List blobs in the bucket under the specified path. The path root is removed.
|
||||
* List blobs in the specific bucket under the specified path. The path root is removed.
|
||||
*
|
||||
* @param path
|
||||
* base path of the blobs to list
|
||||
* @param path base path of the blobs to list
|
||||
* @return a map of blob names and their metadata
|
||||
*/
|
||||
Map<String, BlobMetaData> listBlobs(String path) throws IOException {
|
||||
|
@ -117,20 +122,19 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
}
|
||||
|
||||
/**
|
||||
* List all blobs in the bucket which have a prefix
|
||||
* List all blobs in the specific bucket with names prefixed
|
||||
*
|
||||
* @param path
|
||||
* base path of the blobs to list. This path is removed from the
|
||||
* names of the blobs returned.
|
||||
* @param prefix
|
||||
* prefix of the blobs to list.
|
||||
* @param prefix prefix of the blobs to list.
|
||||
* @return a map of blob names and their metadata.
|
||||
*/
|
||||
Map<String, BlobMetaData> listBlobsByPrefix(String path, String prefix) throws IOException {
|
||||
final String pathPrefix = buildKey(path, prefix);
|
||||
final MapBuilder<String, BlobMetaData> mapBuilder = MapBuilder.newMapBuilder();
|
||||
SocketAccess.doPrivilegedVoidIOException(() -> {
|
||||
storage.get(bucket).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> {
|
||||
client().get(bucketName).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> {
|
||||
assert blob.getName().startsWith(path);
|
||||
final String suffixName = blob.getName().substring(path.length());
|
||||
mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize()));
|
||||
|
@ -140,26 +144,26 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns true if the blob exists in the bucket
|
||||
* Returns true if the blob exists in the specific bucket
|
||||
*
|
||||
* @param blobName name of the blob
|
||||
* @return true if the blob exists, false otherwise
|
||||
* @return true iff the blob exists
|
||||
*/
|
||||
boolean blobExists(String blobName) throws IOException {
|
||||
final BlobId blobId = BlobId.of(bucket, blobName);
|
||||
final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId));
|
||||
final BlobId blobId = BlobId.of(bucketName, blobName);
|
||||
final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId));
|
||||
return blob != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an {@link java.io.InputStream} for a given blob
|
||||
* Returns an {@link java.io.InputStream} for the given blob name
|
||||
*
|
||||
* @param blobName name of the blob
|
||||
* @return an InputStream
|
||||
* @return the InputStream used to read the blob's content
|
||||
*/
|
||||
InputStream readBlob(String blobName) throws IOException {
|
||||
final BlobId blobId = BlobId.of(bucket, blobName);
|
||||
final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId));
|
||||
final BlobId blobId = BlobId.of(bucketName, blobName);
|
||||
final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId));
|
||||
if (blob == null) {
|
||||
throw new NoSuchFileException("Blob [" + blobName + "] does not exit");
|
||||
}
|
||||
|
@ -184,13 +188,13 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
}
|
||||
|
||||
/**
|
||||
* Writes a blob in the bucket.
|
||||
* Writes a blob in the specific bucket
|
||||
*
|
||||
* @param inputStream content of the blob to be written
|
||||
* @param blobSize expected size of the blob to be written
|
||||
*/
|
||||
void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
|
||||
final BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).build();
|
||||
final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build();
|
||||
if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) {
|
||||
writeBlobResumable(blobInfo, inputStream);
|
||||
} else {
|
||||
|
@ -208,8 +212,8 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
*/
|
||||
private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException {
|
||||
try {
|
||||
final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException(
|
||||
() -> storage.writer(blobInfo, Storage.BlobWriteOption.doesNotExist()));
|
||||
final WriteChannel writeChannel = SocketAccess
|
||||
.doPrivilegedIOException(() -> client().writer(blobInfo, Storage.BlobWriteOption.doesNotExist()));
|
||||
Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() {
|
||||
@Override
|
||||
public boolean isOpen() {
|
||||
|
@ -227,7 +231,7 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src));
|
||||
}
|
||||
}));
|
||||
} catch (StorageException se) {
|
||||
} catch (final StorageException se) {
|
||||
if (se.getCode() == HTTP_PRECON_FAILED) {
|
||||
throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage());
|
||||
}
|
||||
|
@ -249,45 +253,43 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method";
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize));
|
||||
Streams.copy(inputStream, baos);
|
||||
SocketAccess.doPrivilegedVoidIOException(
|
||||
() -> {
|
||||
try {
|
||||
storage.create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist());
|
||||
} catch (StorageException se) {
|
||||
if (se.getCode() == HTTP_PRECON_FAILED) {
|
||||
throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage());
|
||||
}
|
||||
throw se;
|
||||
}
|
||||
});
|
||||
try {
|
||||
SocketAccess.doPrivilegedVoidIOException(
|
||||
() -> client().create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist()));
|
||||
} catch (final StorageException se) {
|
||||
if (se.getCode() == HTTP_PRECON_FAILED) {
|
||||
throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage());
|
||||
}
|
||||
throw se;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a blob in the bucket
|
||||
* Deletes the blob from the specific bucket
|
||||
*
|
||||
* @param blobName name of the blob
|
||||
*/
|
||||
void deleteBlob(String blobName) throws IOException {
|
||||
final BlobId blobId = BlobId.of(bucket, blobName);
|
||||
final boolean deleted = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobId));
|
||||
final BlobId blobId = BlobId.of(bucketName, blobName);
|
||||
final boolean deleted = SocketAccess.doPrivilegedIOException(() -> client().delete(blobId));
|
||||
if (deleted == false) {
|
||||
throw new NoSuchFileException("Blob [" + blobName + "] does not exist");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes multiple blobs in the bucket that have a given prefix
|
||||
* Deletes multiple blobs from the specific bucket all of which have prefixed names
|
||||
*
|
||||
* @param prefix prefix of the buckets to delete
|
||||
* @param prefix prefix of the blobs to delete
|
||||
*/
|
||||
void deleteBlobsByPrefix(String prefix) throws IOException {
|
||||
deleteBlobs(listBlobsByPrefix("", prefix).keySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes multiple blobs in the given bucket (uses a batch request to perform this)
|
||||
* Deletes multiple blobs from the specific bucket using a batch request
|
||||
*
|
||||
* @param blobNames names of the bucket to delete
|
||||
* @param blobNames names of the blobs to delete
|
||||
*/
|
||||
void deleteBlobs(Collection<String> blobNames) throws IOException {
|
||||
if (blobNames.isEmpty()) {
|
||||
|
@ -298,13 +300,13 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
|
|||
deleteBlob(blobNames.iterator().next());
|
||||
return;
|
||||
}
|
||||
final List<BlobId> blobIdsToDelete = blobNames.stream().map(blobName -> BlobId.of(bucket, blobName)).collect(Collectors.toList());
|
||||
final List<Boolean> deletedStatuses = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobIdsToDelete));
|
||||
final List<BlobId> blobIdsToDelete = blobNames.stream().map(blob -> BlobId.of(bucketName, blob)).collect(Collectors.toList());
|
||||
final List<Boolean> deletedStatuses = SocketAccess.doPrivilegedIOException(() -> client().delete(blobIdsToDelete));
|
||||
assert blobIdsToDelete.size() == deletedStatuses.size();
|
||||
boolean failed = false;
|
||||
for (int i = 0; i < blobIdsToDelete.size(); i++) {
|
||||
if (deletedStatuses.get(i) == false) {
|
||||
logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucket);
|
||||
logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucketName);
|
||||
failed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,35 +24,34 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.plugins.RepositoryPlugin;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin {
|
||||
public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin {
|
||||
|
||||
private final Map<String, GoogleCloudStorageClientSettings> clientsSettings;
|
||||
// package-private for tests
|
||||
final GoogleCloudStorageService storageService;
|
||||
|
||||
public GoogleCloudStoragePlugin(final Settings settings) {
|
||||
clientsSettings = GoogleCloudStorageClientSettings.load(settings);
|
||||
}
|
||||
|
||||
protected Map<String, GoogleCloudStorageClientSettings> getClientsSettings() {
|
||||
return clientsSettings;
|
||||
this.storageService = createStorageService(settings);
|
||||
// eagerly load client settings so that secure settings are readable (not closed)
|
||||
reload(settings);
|
||||
}
|
||||
|
||||
// overridable for tests
|
||||
protected GoogleCloudStorageService createStorageService(Environment environment) {
|
||||
return new GoogleCloudStorageService(environment, clientsSettings);
|
||||
protected GoogleCloudStorageService createStorageService(Settings settings) {
|
||||
return new GoogleCloudStorageService(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
|
||||
return Collections.singletonMap(GoogleCloudStorageRepository.TYPE,
|
||||
(metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, createStorageService(env)));
|
||||
(metadata) -> new GoogleCloudStorageRepository(metadata, env, namedXContentRegistry, this.storageService));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,4 +65,15 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin
|
|||
GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING,
|
||||
GoogleCloudStorageClientSettings.TOKEN_URI_SETTING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(Settings settings) {
|
||||
// Secure settings should be readable inside this method. Duplicate client
|
||||
// settings in a format (`GoogleCloudStorageClientSettings`) that does not
|
||||
// require for the `SecureSettings` to be open. Pass that around (the
|
||||
// `GoogleCloudStorageClientSettings` instance) instead of the `Settings`
|
||||
// instance.
|
||||
final Map<String, GoogleCloudStorageClientSettings> clientsSettings = GoogleCloudStorageClientSettings.load(settings);
|
||||
this.storageService.refreshAndClearCache(clientsSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,8 +38,6 @@ import static org.elasticsearch.common.settings.Setting.boolSetting;
|
|||
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
|
||||
import static org.elasticsearch.common.settings.Setting.simpleString;
|
||||
|
||||
import com.google.cloud.storage.Storage;
|
||||
|
||||
class GoogleCloudStorageRepository extends BlobStoreRepository {
|
||||
|
||||
// package private for testing
|
||||
|
@ -86,8 +84,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository {
|
|||
|
||||
logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress);
|
||||
|
||||
Storage client = SocketAccess.doPrivilegedIOException(() -> storageService.createClient(clientName));
|
||||
this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client);
|
||||
this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -28,11 +28,13 @@ import com.google.cloud.http.HttpTransportOptions;
|
|||
import com.google.cloud.storage.Storage;
|
||||
import com.google.cloud.storage.StorageOptions;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.common.util.LazyInitializable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
|
@ -40,30 +42,74 @@ import java.net.URI;
|
|||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
public class GoogleCloudStorageService extends AbstractComponent {
|
||||
|
||||
/** Clients settings identified by client name. */
|
||||
private final Map<String, GoogleCloudStorageClientSettings> clientsSettings;
|
||||
/**
|
||||
* Dictionary of client instances. Client instances are built lazily from the
|
||||
* latest settings.
|
||||
*/
|
||||
private final AtomicReference<Map<String, LazyInitializable<Storage, IOException>>> clientsCache = new AtomicReference<>(emptyMap());
|
||||
|
||||
public GoogleCloudStorageService(final Environment environment, final Map<String, GoogleCloudStorageClientSettings> clientsSettings) {
|
||||
super(environment.settings());
|
||||
this.clientsSettings = clientsSettings;
|
||||
public GoogleCloudStorageService(final Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a client that can be used to manage Google Cloud Storage objects.
|
||||
* Refreshes the client settings and clears the client cache. Subsequent calls to
|
||||
* {@code GoogleCloudStorageService#client} will return new clients constructed
|
||||
* using the parameter settings.
|
||||
*
|
||||
* @param clientsSettings the new settings used for building clients for subsequent requests
|
||||
*/
|
||||
public synchronized void refreshAndClearCache(Map<String, GoogleCloudStorageClientSettings> clientsSettings) {
|
||||
// build the new lazy clients
|
||||
final MapBuilder<String, LazyInitializable<Storage, IOException>> newClientsCache = MapBuilder.newMapBuilder();
|
||||
for (final Map.Entry<String, GoogleCloudStorageClientSettings> entry : clientsSettings.entrySet()) {
|
||||
newClientsCache.put(entry.getKey(),
|
||||
new LazyInitializable<Storage, IOException>(() -> createClient(entry.getKey(), entry.getValue())));
|
||||
}
|
||||
// make the new clients available
|
||||
final Map<String, LazyInitializable<Storage, IOException>> oldClientCache = clientsCache.getAndSet(newClientsCache.immutableMap());
|
||||
// release old clients
|
||||
oldClientCache.values().forEach(LazyInitializable::reset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to retrieve a client from the cache. If the client does not exist it
|
||||
* will be created from the latest settings and will populate the cache. The
|
||||
* returned instance should not be cached by the calling code. Instead, for each
|
||||
* use, the (possibly updated) instance should be requested by calling this
|
||||
* method.
|
||||
*
|
||||
* @param clientName name of the client settings used to create the client
|
||||
* @return a cached client storage instance that can be used to manage objects
|
||||
* (blobs)
|
||||
*/
|
||||
public Storage client(final String clientName) throws IOException {
|
||||
final LazyInitializable<Storage, IOException> lazyClient = clientsCache.get().get(clientName);
|
||||
if (lazyClient == null) {
|
||||
throw new IllegalArgumentException("Unknown client name [" + clientName + "]. Existing client configs: "
|
||||
+ Strings.collectionToDelimitedString(clientsCache.get().keySet(), ","));
|
||||
}
|
||||
return lazyClient.getOrCompute();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a client that can be used to manage Google Cloud Storage objects. The client is thread-safe.
|
||||
*
|
||||
* @param clientName name of client settings to use, including secure settings
|
||||
* @return a Client instance that can be used to manage Storage objects
|
||||
* @param clientSettings name of client settings to use, including secure settings
|
||||
* @return a new client storage instance that can be used to manage objects
|
||||
* (blobs)
|
||||
*/
|
||||
public Storage createClient(final String clientName) throws Exception {
|
||||
final GoogleCloudStorageClientSettings clientSettings = clientsSettings.get(clientName);
|
||||
if (clientSettings == null) {
|
||||
throw new IllegalArgumentException("Unknown client name [" + clientName + "]. Existing client configs: "
|
||||
+ Strings.collectionToDelimitedString(clientsSettings.keySet(), ","));
|
||||
}
|
||||
final HttpTransport httpTransport = createHttpTransport(clientSettings.getHost());
|
||||
private Storage createClient(final String clientName, final GoogleCloudStorageClientSettings clientSettings) throws IOException {
|
||||
logger.debug(() -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName,
|
||||
clientSettings.getHost()));
|
||||
final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> createHttpTransport(clientSettings.getHost()));
|
||||
final HttpTransportOptions httpTransportOptions = HttpTransportOptions.newBuilder()
|
||||
.setConnectTimeout(toTimeout(clientSettings.getConnectTimeout()))
|
||||
.setReadTimeout(toTimeout(clientSettings.getReadTimeout()))
|
||||
|
@ -114,6 +160,9 @@ public class GoogleCloudStorageService extends AbstractComponent {
|
|||
builder.trustCertificates(GoogleUtils.getCertificateTrustStore());
|
||||
if (Strings.hasLength(endpoint)) {
|
||||
final URL endpointUrl = URI.create(endpoint).toURL();
|
||||
// it is crucial to open a connection for each URL (see {@code
|
||||
// DefaultConnectionFactory#openConnection}) instead of reusing connections,
|
||||
// because the storage instance has to be thread-safe as it is cached.
|
||||
builder.setConnectionFactory(new DefaultConnectionFactory() {
|
||||
@Override
|
||||
public HttpURLConnection openConnection(final URL originalUrl) throws IOException {
|
||||
|
|
|
@ -26,11 +26,22 @@ import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
|||
import java.util.Locale;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
|
||||
@Override
|
||||
protected BlobStore newBlobStore() {
|
||||
String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>()));
|
||||
final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class);
|
||||
try {
|
||||
when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>()));
|
||||
} catch (final Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,14 +24,12 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
|
||||
import org.junit.AfterClass;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
|
@ -73,19 +71,19 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
|
|||
}
|
||||
|
||||
@Override
|
||||
protected GoogleCloudStorageService createStorageService(Environment environment) {
|
||||
return new MockGoogleCloudStorageService(environment, getClientsSettings());
|
||||
protected GoogleCloudStorageService createStorageService(Settings settings) {
|
||||
return new MockGoogleCloudStorageService(settings);
|
||||
}
|
||||
}
|
||||
|
||||
public static class MockGoogleCloudStorageService extends GoogleCloudStorageService {
|
||||
|
||||
MockGoogleCloudStorageService(Environment environment, Map<String, GoogleCloudStorageClientSettings> clientsSettings) {
|
||||
super(environment, clientsSettings);
|
||||
MockGoogleCloudStorageService(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Storage createClient(String clientName) {
|
||||
public Storage client(String clientName) {
|
||||
return new MockStorage(BUCKET, blobs);
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +95,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
|
|||
assertEquals(GoogleCloudStorageRepository.MAX_CHUNK_SIZE, chunkSize);
|
||||
|
||||
// chunk size in settings
|
||||
int size = randomIntBetween(1, 100);
|
||||
final int size = randomIntBetween(1, 100);
|
||||
repositoryMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
Settings.builder().put("chunk_size", size + "mb").build());
|
||||
chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetaData);
|
||||
|
@ -105,7 +103,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
|
|||
|
||||
// zero bytes is not allowed
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
|
||||
RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
Settings.builder().put("chunk_size", "0").build());
|
||||
GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);
|
||||
});
|
||||
|
@ -113,7 +111,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
|
|||
|
||||
// negative bytes not allowed
|
||||
e = expectThrows(IllegalArgumentException.class, () -> {
|
||||
RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
Settings.builder().put("chunk_size", "-1").build());
|
||||
GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);
|
||||
});
|
||||
|
@ -121,7 +119,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos
|
|||
|
||||
// greater than max chunk size not allowed
|
||||
e = expectThrows(IllegalArgumentException.class, () -> {
|
||||
RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
final RepositoryMetaData repoMetaData = new RepositoryMetaData("repo", GoogleCloudStorageRepository.TYPE,
|
||||
Settings.builder().put("chunk_size", "101mb").build());
|
||||
GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData);
|
||||
});
|
||||
|
|
|
@ -26,11 +26,22 @@ import org.elasticsearch.repositories.ESBlobStoreTestCase;
|
|||
import java.util.Locale;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase {
|
||||
|
||||
@Override
|
||||
protected BlobStore newBlobStore() {
|
||||
String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>()));
|
||||
final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class);
|
||||
try {
|
||||
when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>()));
|
||||
} catch (final Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,28 +23,36 @@ import com.google.auth.Credentials;
|
|||
import com.google.cloud.http.HttpTransportOptions;
|
||||
import com.google.cloud.storage.Storage;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
import java.security.KeyPair;
|
||||
import java.security.KeyPairGenerator;
|
||||
import java.util.Base64;
|
||||
import java.util.Locale;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class GoogleCloudStorageServiceTests extends ESTestCase {
|
||||
|
||||
public void testClientInitializer() throws Exception {
|
||||
final String clientName = randomAlphaOfLength(4).toLowerCase(Locale.ROOT);
|
||||
final Environment environment = mock(Environment.class);
|
||||
final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000));
|
||||
final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000));
|
||||
final String applicationName = randomAlphaOfLength(4);
|
||||
final String hostName = randomFrom("http://", "https://") + randomAlphaOfLength(4) + ":" + randomIntBetween(1, 65535);
|
||||
final String projectIdName = randomAlphaOfLength(4);
|
||||
final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final String endpoint = randomFrom("http://", "https://")
|
||||
+ randomFrom("www.elastic.co", "www.googleapis.com", "localhost/api", "google.com/oauth")
|
||||
+ ":" + randomIntBetween(1, 65535);
|
||||
final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final Settings settings = Settings.builder()
|
||||
.put(GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(),
|
||||
connectTimeValue.getStringRep())
|
||||
|
@ -52,20 +60,18 @@ public class GoogleCloudStorageServiceTests extends ESTestCase {
|
|||
readTimeValue.getStringRep())
|
||||
.put(GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(),
|
||||
applicationName)
|
||||
.put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), hostName)
|
||||
.put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint)
|
||||
.put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName)
|
||||
.build();
|
||||
when(environment.settings()).thenReturn(settings);
|
||||
final GoogleCloudStorageClientSettings clientSettings = GoogleCloudStorageClientSettings.getClientSettings(settings, clientName);
|
||||
final GoogleCloudStorageService service = new GoogleCloudStorageService(environment,
|
||||
Collections.singletonMap(clientName, clientSettings));
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.createClient("another_client"));
|
||||
final GoogleCloudStorageService service = new GoogleCloudStorageService(settings);
|
||||
service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings));
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.client("another_client"));
|
||||
assertThat(e.getMessage(), Matchers.startsWith("Unknown client name"));
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
new Setting<?>[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) });
|
||||
final Storage storage = service.createClient(clientName);
|
||||
final Storage storage = service.client(clientName);
|
||||
assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName));
|
||||
assertThat(storage.getOptions().getHost(), Matchers.is(hostName));
|
||||
assertThat(storage.getOptions().getHost(), Matchers.is(endpoint));
|
||||
assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName));
|
||||
assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class));
|
||||
assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(),
|
||||
|
@ -75,6 +81,58 @@ public class GoogleCloudStorageServiceTests extends ESTestCase {
|
|||
assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class));
|
||||
}
|
||||
|
||||
public void testReinitClientSettings() throws Exception {
|
||||
final MockSecureSettings secureSettings1 = new MockSecureSettings();
|
||||
secureSettings1.setFile("gcs.client.gcs1.credentials_file", serviceAccountFileContent("project_gcs11"));
|
||||
secureSettings1.setFile("gcs.client.gcs2.credentials_file", serviceAccountFileContent("project_gcs12"));
|
||||
final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build();
|
||||
final MockSecureSettings secureSettings2 = new MockSecureSettings();
|
||||
secureSettings2.setFile("gcs.client.gcs1.credentials_file", serviceAccountFileContent("project_gcs21"));
|
||||
secureSettings2.setFile("gcs.client.gcs3.credentials_file", serviceAccountFileContent("project_gcs23"));
|
||||
final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build();
|
||||
try (GoogleCloudStoragePlugin plugin = new GoogleCloudStoragePlugin(settings1)) {
|
||||
final GoogleCloudStorageService storageService = plugin.storageService;
|
||||
final Storage client11 = storageService.client("gcs1");
|
||||
assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11"));
|
||||
final Storage client12 = storageService.client("gcs2");
|
||||
assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12"));
|
||||
// client 3 is missing
|
||||
final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs3"));
|
||||
assertThat(e1.getMessage(), containsString("Unknown client name [gcs3]."));
|
||||
// update client settings
|
||||
plugin.reload(settings2);
|
||||
// old client 1 not changed
|
||||
assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11"));
|
||||
// new client 1 is changed
|
||||
final Storage client21 = storageService.client("gcs1");
|
||||
assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21"));
|
||||
// old client 2 not changed
|
||||
assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12"));
|
||||
// new client2 is gone
|
||||
final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, () -> storageService.client("gcs2"));
|
||||
assertThat(e2.getMessage(), containsString("Unknown client name [gcs2]."));
|
||||
// client 3 emerged
|
||||
final Storage client23 = storageService.client("gcs3");
|
||||
assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23"));
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] serviceAccountFileContent(String projectId) throws Exception {
|
||||
final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
|
||||
keyPairGenerator.initialize(1024);
|
||||
final KeyPair keyPair = keyPairGenerator.generateKeyPair();
|
||||
final String encodedKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded());
|
||||
final XContentBuilder serviceAccountBuilder = jsonBuilder().startObject()
|
||||
.field("type", "service_account")
|
||||
.field("project_id", projectId)
|
||||
.field("private_key_id", UUID.randomUUID().toString())
|
||||
.field("private_key", "-----BEGIN PRIVATE KEY-----\n" + encodedKey + "\n-----END PRIVATE KEY-----\n")
|
||||
.field("client_email", "integration_test@appspot.gserviceaccount.com")
|
||||
.field("client_id", "client_id")
|
||||
.endObject();
|
||||
return BytesReference.toBytes(BytesReference.bytes(serviceAccountBuilder));
|
||||
}
|
||||
|
||||
public void testToTimeout() {
|
||||
assertEquals(-1, GoogleCloudStorageService.toTimeout(null).intValue());
|
||||
assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue());
|
||||
|
|
|
@ -55,7 +55,7 @@ bundlePlugin {
|
|||
}
|
||||
|
||||
additionalTest('testRepositoryCreds'){
|
||||
include '**/RepositorySettingsCredentialsTests.class'
|
||||
include '**/RepositoryCredentialsTests.class'
|
||||
systemProperty 'es.allow_insecure_settings', 'true'
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3Client;
|
||||
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
|
||||
/**
|
||||
* Handles the shutdown of the wrapped {@link AmazonS3Client} using reference
|
||||
* counting.
|
||||
*/
|
||||
public class AmazonS3Reference extends AbstractRefCounted implements Releasable {
|
||||
|
||||
private final AmazonS3 client;
|
||||
|
||||
AmazonS3Reference(AmazonS3 client) {
|
||||
super("AWS_S3_CLIENT");
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call when the client is not needed anymore.
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
decRef();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the underlying `AmazonS3` client. All method calls are permitted BUT
|
||||
* NOT shutdown. Shutdown is called when reference count reaches 0.
|
||||
*/
|
||||
public AmazonS3 client() {
|
||||
return client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeInternal() {
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
}
|
|
@ -19,14 +19,25 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import java.io.Closeable;
|
||||
import java.util.Map;
|
||||
|
||||
interface AwsS3Service extends LifecycleComponent {
|
||||
interface AwsS3Service extends Closeable {
|
||||
|
||||
/**
|
||||
* Creates an {@code AmazonS3} client from the given repository metadata and node settings.
|
||||
* Creates then caches an {@code AmazonS3} client using the current client
|
||||
* settings. Returns an {@code AmazonS3Reference} wrapper which has to be
|
||||
* released as soon as it is not needed anymore.
|
||||
*/
|
||||
AmazonS3 client(Settings repositorySettings);
|
||||
AmazonS3Reference client(String clientName);
|
||||
|
||||
/**
|
||||
* Updates settings for building clients and clears the client cache. Future
|
||||
* client requests will use the new settings to lazily build new clients.
|
||||
*
|
||||
* @param clientsSettings the new refreshed settings
|
||||
* @return the old stale settings
|
||||
*/
|
||||
Map<String, S3ClientSettings> refreshAndClearCache(Map<String, S3ClientSettings> clientsSettings);
|
||||
|
||||
}
|
||||
|
|
|
@ -28,66 +28,91 @@ import com.amazonaws.http.IdleConnectionReaper;
|
|||
import com.amazonaws.internal.StaticCredentialsProvider;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3Client;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
|
||||
class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service {
|
||||
class InternalAwsS3Service extends AbstractComponent implements AwsS3Service {
|
||||
|
||||
// pkg private for tests
|
||||
static final Setting<String> CLIENT_NAME = new Setting<>("client", "default", Function.identity());
|
||||
private volatile Map<String, AmazonS3Reference> clientsCache = emptyMap();
|
||||
private volatile Map<String, S3ClientSettings> clientsSettings = emptyMap();
|
||||
|
||||
private final Map<String, S3ClientSettings> clientsSettings;
|
||||
|
||||
private final Map<String, AmazonS3Client> clientsCache = new HashMap<>();
|
||||
|
||||
InternalAwsS3Service(Settings settings, Map<String, S3ClientSettings> clientsSettings) {
|
||||
InternalAwsS3Service(Settings settings) {
|
||||
super(settings);
|
||||
this.clientsSettings = clientsSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Refreshes the settings for the AmazonS3 clients and clears the cache of
|
||||
* existing clients. New clients will be build using these new settings. Old
|
||||
* clients are usable until released. On release they will be destroyed instead
|
||||
* to being returned to the cache.
|
||||
*/
|
||||
@Override
|
||||
public synchronized AmazonS3 client(Settings repositorySettings) {
|
||||
String clientName = CLIENT_NAME.get(repositorySettings);
|
||||
AmazonS3Client client = clientsCache.get(clientName);
|
||||
if (client != null) {
|
||||
return client;
|
||||
public synchronized Map<String, S3ClientSettings> refreshAndClearCache(Map<String, S3ClientSettings> clientsSettings) {
|
||||
// shutdown all unused clients
|
||||
// others will shutdown on their respective release
|
||||
releaseCachedClients();
|
||||
final Map<String, S3ClientSettings> prevSettings = this.clientsSettings;
|
||||
this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap();
|
||||
assert this.clientsSettings.containsKey("default") : "always at least have 'default'";
|
||||
// clients are built lazily by {@link client(String)}
|
||||
return prevSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to retrieve a client by name from the cache. If the client does not
|
||||
* exist it will be created.
|
||||
*/
|
||||
@Override
|
||||
public AmazonS3Reference client(String clientName) {
|
||||
AmazonS3Reference clientReference = clientsCache.get(clientName);
|
||||
if ((clientReference != null) && clientReference.tryIncRef()) {
|
||||
return clientReference;
|
||||
}
|
||||
|
||||
S3ClientSettings clientSettings = clientsSettings.get(clientName);
|
||||
if (clientSettings == null) {
|
||||
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " +
|
||||
Strings.collectionToDelimitedString(clientsSettings.keySet(), ","));
|
||||
synchronized (this) {
|
||||
clientReference = clientsCache.get(clientName);
|
||||
if ((clientReference != null) && clientReference.tryIncRef()) {
|
||||
return clientReference;
|
||||
}
|
||||
final S3ClientSettings clientSettings = clientsSettings.get(clientName);
|
||||
if (clientSettings == null) {
|
||||
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: "
|
||||
+ Strings.collectionToDelimitedString(clientsSettings.keySet(), ","));
|
||||
}
|
||||
logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint);
|
||||
clientReference = new AmazonS3Reference(buildClient(clientSettings));
|
||||
clientReference.incRef();
|
||||
clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientName, clientReference).immutableMap();
|
||||
return clientReference;
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint);
|
||||
|
||||
AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings);
|
||||
ClientConfiguration configuration = buildConfiguration(clientSettings);
|
||||
|
||||
client = new AmazonS3Client(credentials, configuration);
|
||||
|
||||
private AmazonS3 buildClient(S3ClientSettings clientSettings) {
|
||||
final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
|
||||
final ClientConfiguration configuration = buildConfiguration(clientSettings);
|
||||
final AmazonS3 client = buildClient(credentials, configuration);
|
||||
if (Strings.hasText(clientSettings.endpoint)) {
|
||||
client.setEndpoint(clientSettings.endpoint);
|
||||
}
|
||||
|
||||
clientsCache.put(clientName, client);
|
||||
return client;
|
||||
}
|
||||
|
||||
// proxy for testing
|
||||
AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
return new AmazonS3Client(credentials, configuration);
|
||||
}
|
||||
|
||||
// pkg private for tests
|
||||
static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) {
|
||||
ClientConfiguration clientConfiguration = new ClientConfiguration();
|
||||
final ClientConfiguration clientConfiguration = new ClientConfiguration();
|
||||
// the response metadata cache is only there for diagnostics purposes,
|
||||
// but can force objects from every response to the old generation.
|
||||
clientConfiguration.setResponseMetadataCacheSize(0);
|
||||
|
@ -109,27 +134,8 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
}
|
||||
|
||||
// pkg private for tests
|
||||
static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger,
|
||||
S3ClientSettings clientSettings, Settings repositorySettings) {
|
||||
|
||||
|
||||
BasicAWSCredentials credentials = clientSettings.credentials;
|
||||
if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) {
|
||||
if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) {
|
||||
throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() +
|
||||
" must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]");
|
||||
}
|
||||
try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings);
|
||||
SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) {
|
||||
credentials = new BasicAWSCredentials(key.toString(), secret.toString());
|
||||
}
|
||||
// backcompat for reading keys out of repository settings
|
||||
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " +
|
||||
"store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
} else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) {
|
||||
throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey() +
|
||||
" must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]");
|
||||
}
|
||||
static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) {
|
||||
final BasicAWSCredentials credentials = clientSettings.credentials;
|
||||
if (credentials == null) {
|
||||
logger.debug("Using instance profile credentials");
|
||||
return new PrivilegedInstanceProfileCredentialsProvider();
|
||||
|
@ -139,21 +145,15 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws ElasticsearchException {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() throws ElasticsearchException {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws ElasticsearchException {
|
||||
for (AmazonS3Client client : clientsCache.values()) {
|
||||
client.shutdown();
|
||||
protected synchronized void releaseCachedClients() {
|
||||
// the clients will shutdown when they will not be used anymore
|
||||
for (final AmazonS3Reference clientReference : clientsCache.values()) {
|
||||
clientReference.decRef();
|
||||
}
|
||||
|
||||
// Ensure that IdleConnectionReaper is shutdown
|
||||
// clear previously cached clients, they will be build lazily
|
||||
clientsCache = emptyMap();
|
||||
// shutdown IdleConnectionReaper background thread
|
||||
// it will be restarted on new client usage
|
||||
IdleConnectionReaper.shutdown();
|
||||
}
|
||||
|
||||
|
@ -174,4 +174,10 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
SocketAccess.doPrivilegedVoid(credentials::refresh);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
releaseCachedClients();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
|
||||
import com.amazonaws.services.s3.model.AmazonS3Exception;
|
||||
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
|
||||
|
@ -47,8 +46,6 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -70,19 +67,20 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
|
||||
@Override
|
||||
public boolean blobExists(String blobName) {
|
||||
try {
|
||||
return SocketAccess.doPrivileged(() -> blobStore.client().doesObjectExist(blobStore.bucket(), buildKey(blobName)));
|
||||
} catch (Exception e) {
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
return SocketAccess.doPrivileged(() -> clientReference.client().doesObjectExist(blobStore.bucket(), buildKey(blobName)));
|
||||
} catch (final Exception e) {
|
||||
throw new BlobStoreException("Failed to check if blob [" + blobName +"] exists", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName) throws IOException {
|
||||
try {
|
||||
S3Object s3Object = SocketAccess.doPrivileged(() -> blobStore.client().getObject(blobStore.bucket(), buildKey(blobName)));
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(blobStore.bucket(),
|
||||
buildKey(blobName)));
|
||||
return s3Object.getObjectContent();
|
||||
} catch (AmazonClientException e) {
|
||||
} catch (final AmazonClientException e) {
|
||||
if (e instanceof AmazonS3Exception) {
|
||||
if (404 == ((AmazonS3Exception) e).getStatusCode()) {
|
||||
throw new NoSuchFileException("Blob object [" + blobName + "] not found: " + e.getMessage());
|
||||
|
@ -110,44 +108,45 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
throw new NoSuchFileException("Blob [" + blobName + "] does not exist");
|
||||
}
|
||||
|
||||
try {
|
||||
SocketAccess.doPrivilegedVoid(() -> blobStore.client().deleteObject(blobStore.bucket(), buildKey(blobName)));
|
||||
} catch (AmazonClientException e) {
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObject(blobStore.bucket(), buildKey(blobName)));
|
||||
} catch (final AmazonClientException e) {
|
||||
throw new IOException("Exception when deleting blob [" + blobName + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, BlobMetaData> listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException {
|
||||
return AccessController.doPrivileged((PrivilegedAction<Map<String, BlobMetaData>>) () -> {
|
||||
MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
|
||||
AmazonS3 client = blobStore.client();
|
||||
SocketAccess.doPrivilegedVoid(() -> {
|
||||
ObjectListing prevListing = null;
|
||||
while (true) {
|
||||
ObjectListing list;
|
||||
if (prevListing != null) {
|
||||
list = client.listNextBatchOfObjects(prevListing);
|
||||
final MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
ObjectListing prevListing = null;
|
||||
while (true) {
|
||||
ObjectListing list;
|
||||
if (prevListing != null) {
|
||||
final ObjectListing finalPrevListing = prevListing;
|
||||
list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing));
|
||||
} else {
|
||||
if (blobNamePrefix != null) {
|
||||
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(),
|
||||
buildKey(blobNamePrefix)));
|
||||
} else {
|
||||
if (blobNamePrefix != null) {
|
||||
list = client.listObjects(blobStore.bucket(), buildKey(blobNamePrefix));
|
||||
} else {
|
||||
list = client.listObjects(blobStore.bucket(), keyPath);
|
||||
}
|
||||
}
|
||||
for (S3ObjectSummary summary : list.getObjectSummaries()) {
|
||||
String name = summary.getKey().substring(keyPath.length());
|
||||
blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize()));
|
||||
}
|
||||
if (list.isTruncated()) {
|
||||
prevListing = list;
|
||||
} else {
|
||||
break;
|
||||
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(blobStore.bucket(), keyPath));
|
||||
}
|
||||
}
|
||||
});
|
||||
for (final S3ObjectSummary summary : list.getObjectSummaries()) {
|
||||
final String name = summary.getKey().substring(keyPath.length());
|
||||
blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize()));
|
||||
}
|
||||
if (list.isTruncated()) {
|
||||
prevListing = list;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return blobsBuilder.immutableMap();
|
||||
});
|
||||
} catch (final AmazonClientException e) {
|
||||
throw new IOException("Exception when listing blobs by prefix [" + blobNamePrefix + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -175,19 +174,20 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
|
||||
}
|
||||
|
||||
try {
|
||||
final ObjectMetadata md = new ObjectMetadata();
|
||||
md.setContentLength(blobSize);
|
||||
if (blobStore.serverSideEncryption()) {
|
||||
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
|
||||
}
|
||||
final ObjectMetadata md = new ObjectMetadata();
|
||||
md.setContentLength(blobSize);
|
||||
if (blobStore.serverSideEncryption()) {
|
||||
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
|
||||
}
|
||||
final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
|
||||
putRequest.setStorageClass(blobStore.getStorageClass());
|
||||
putRequest.setCannedAcl(blobStore.getCannedACL());
|
||||
|
||||
final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
|
||||
putRequest.setStorageClass(blobStore.getStorageClass());
|
||||
putRequest.setCannedAcl(blobStore.getCannedACL());
|
||||
|
||||
blobStore.client().putObject(putRequest);
|
||||
} catch (AmazonClientException e) {
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
SocketAccess.doPrivilegedVoid(() -> {
|
||||
clientReference.client().putObject(putRequest);
|
||||
});
|
||||
} catch (final AmazonClientException e) {
|
||||
throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
|
||||
}
|
||||
}
|
||||
|
@ -218,23 +218,23 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
|
||||
final int nbParts = multiparts.v1().intValue();
|
||||
final long lastPartSize = multiparts.v2();
|
||||
assert blobSize == (nbParts - 1) * partSize + lastPartSize : "blobSize does not match multipart sizes";
|
||||
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
|
||||
|
||||
final SetOnce<String> uploadId = new SetOnce<>();
|
||||
final String bucketName = blobStore.bucket();
|
||||
boolean success = false;
|
||||
|
||||
try {
|
||||
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
|
||||
initRequest.setStorageClass(blobStore.getStorageClass());
|
||||
initRequest.setCannedACL(blobStore.getCannedACL());
|
||||
if (blobStore.serverSideEncryption()) {
|
||||
final ObjectMetadata md = new ObjectMetadata();
|
||||
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
|
||||
initRequest.setObjectMetadata(md);
|
||||
}
|
||||
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
|
||||
initRequest.setStorageClass(blobStore.getStorageClass());
|
||||
initRequest.setCannedACL(blobStore.getCannedACL());
|
||||
if (blobStore.serverSideEncryption()) {
|
||||
final ObjectMetadata md = new ObjectMetadata();
|
||||
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
|
||||
initRequest.setObjectMetadata(md);
|
||||
}
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
|
||||
uploadId.set(blobStore.client().initiateMultipartUpload(initRequest).getUploadId());
|
||||
uploadId.set(SocketAccess.doPrivileged(() -> clientReference.client().initiateMultipartUpload(initRequest).getUploadId()));
|
||||
if (Strings.isEmpty(uploadId.get())) {
|
||||
throw new IOException("Failed to initialize multipart upload " + blobName);
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
}
|
||||
bytesCount += uploadRequest.getPartSize();
|
||||
|
||||
final UploadPartResult uploadResponse = blobStore.client().uploadPart(uploadRequest);
|
||||
final UploadPartResult uploadResponse = SocketAccess.doPrivileged(() -> clientReference.client().uploadPart(uploadRequest));
|
||||
parts.add(uploadResponse.getPartETag());
|
||||
}
|
||||
|
||||
|
@ -268,16 +268,19 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
+ "bytes sent but got " + bytesCount);
|
||||
}
|
||||
|
||||
CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
|
||||
blobStore.client().completeMultipartUpload(complRequest);
|
||||
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(),
|
||||
parts);
|
||||
SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest));
|
||||
success = true;
|
||||
|
||||
} catch (AmazonClientException e) {
|
||||
} catch (final AmazonClientException e) {
|
||||
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
|
||||
} finally {
|
||||
if (success == false && Strings.hasLength(uploadId.get())) {
|
||||
if ((success == false) && Strings.hasLength(uploadId.get())) {
|
||||
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
|
||||
blobStore.client().abortMultipartUpload(abortRequest);
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -296,7 +299,7 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
throw new IllegalArgumentException("Part size must be greater than zero");
|
||||
}
|
||||
|
||||
if (totalSize == 0L || totalSize <= partSize) {
|
||||
if ((totalSize == 0L) || (totalSize <= partSize)) {
|
||||
return Tuple.tuple(1L, totalSize);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,13 +19,13 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.CannedAccessControlList;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion;
|
||||
import com.amazonaws.services.s3.model.ObjectListing;
|
||||
import com.amazonaws.services.s3.model.S3ObjectSummary;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
|
@ -34,14 +34,15 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Locale;
|
||||
|
||||
class S3BlobStore extends AbstractComponent implements BlobStore {
|
||||
|
||||
private final AmazonS3 client;
|
||||
private final AwsS3Service service;
|
||||
|
||||
private final String clientName;
|
||||
|
||||
private final String bucket;
|
||||
|
||||
|
@ -53,10 +54,11 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
|
||||
private final StorageClass storageClass;
|
||||
|
||||
S3BlobStore(Settings settings, AmazonS3 client, String bucket, boolean serverSideEncryption,
|
||||
S3BlobStore(Settings settings, AwsS3Service service, String clientName, String bucket, boolean serverSideEncryption,
|
||||
ByteSizeValue bufferSize, String cannedACL, String storageClass) {
|
||||
super(settings);
|
||||
this.client = client;
|
||||
this.service = service;
|
||||
this.clientName = clientName;
|
||||
this.bucket = bucket;
|
||||
this.serverSideEncryption = serverSideEncryption;
|
||||
this.bufferSize = bufferSize;
|
||||
|
@ -68,12 +70,14 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
// Also, if invalid security credentials are used to execute this method, the
|
||||
// client is not able to distinguish between bucket permission errors and
|
||||
// invalid credential errors, and this method could return an incorrect result.
|
||||
SocketAccess.doPrivilegedVoid(() -> {
|
||||
if (client.doesBucketExist(bucket) == false) {
|
||||
throw new IllegalArgumentException("The bucket [" + bucket + "] does not exist. Please create it before " +
|
||||
" creating an s3 snapshot repository backed by it.");
|
||||
}
|
||||
});
|
||||
try (AmazonS3Reference clientReference = clientReference()) {
|
||||
SocketAccess.doPrivilegedVoid(() -> {
|
||||
if (clientReference.client().doesBucketExist(bucket) == false) {
|
||||
throw new IllegalArgumentException("The bucket [" + bucket + "] does not exist. Please create it before "
|
||||
+ " creating an s3 snapshot repository backed by it.");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -81,8 +85,8 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
return bucket;
|
||||
}
|
||||
|
||||
public AmazonS3 client() {
|
||||
return client;
|
||||
public AmazonS3Reference clientReference() {
|
||||
return service.client(clientName);
|
||||
}
|
||||
|
||||
public String bucket() {
|
||||
|
@ -104,27 +108,30 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
|
||||
@Override
|
||||
public void delete(BlobPath path) {
|
||||
AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
|
||||
try (AmazonS3Reference clientReference = clientReference()) {
|
||||
ObjectListing prevListing = null;
|
||||
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
|
||||
//we can do at most 1K objects per delete
|
||||
//We don't know the bucket name until first object listing
|
||||
// From
|
||||
// http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
|
||||
// we can do at most 1K objects per delete
|
||||
// We don't know the bucket name until first object listing
|
||||
DeleteObjectsRequest multiObjectDeleteRequest = null;
|
||||
ArrayList<KeyVersion> keys = new ArrayList<>();
|
||||
final ArrayList<KeyVersion> keys = new ArrayList<>();
|
||||
while (true) {
|
||||
ObjectListing list;
|
||||
if (prevListing != null) {
|
||||
list = client.listNextBatchOfObjects(prevListing);
|
||||
final ObjectListing finalPrevListing = prevListing;
|
||||
list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing));
|
||||
} else {
|
||||
list = client.listObjects(bucket, path.buildAsString());
|
||||
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(bucket, path.buildAsString()));
|
||||
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
|
||||
}
|
||||
for (S3ObjectSummary summary : list.getObjectSummaries()) {
|
||||
for (final S3ObjectSummary summary : list.getObjectSummaries()) {
|
||||
keys.add(new KeyVersion(summary.getKey()));
|
||||
//Every 500 objects batch the delete request
|
||||
// Every 500 objects batch the delete request
|
||||
if (keys.size() > 500) {
|
||||
multiObjectDeleteRequest.setKeys(keys);
|
||||
client.deleteObjects(multiObjectDeleteRequest);
|
||||
final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest;
|
||||
SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest));
|
||||
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
|
||||
keys.clear();
|
||||
}
|
||||
|
@ -137,14 +144,15 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
}
|
||||
if (!keys.isEmpty()) {
|
||||
multiObjectDeleteRequest.setKeys(keys);
|
||||
client.deleteObjects(multiObjectDeleteRequest);
|
||||
final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest;
|
||||
SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest));
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
public void close() throws IOException {
|
||||
this.service.close();
|
||||
}
|
||||
|
||||
public CannedAccessControlList getCannedACL() {
|
||||
|
@ -154,18 +162,18 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
public StorageClass getStorageClass() { return storageClass; }
|
||||
|
||||
public static StorageClass initStorageClass(String storageClass) {
|
||||
if (storageClass == null || storageClass.equals("")) {
|
||||
if ((storageClass == null) || storageClass.equals("")) {
|
||||
return StorageClass.Standard;
|
||||
}
|
||||
|
||||
try {
|
||||
StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
|
||||
final StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
|
||||
if (_storageClass.equals(StorageClass.Glacier)) {
|
||||
throw new BlobStoreException("Glacier storage class is not supported");
|
||||
}
|
||||
|
||||
return _storageClass;
|
||||
} catch (IllegalArgumentException illegalArgumentException) {
|
||||
} catch (final IllegalArgumentException illegalArgumentException) {
|
||||
throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class.");
|
||||
}
|
||||
}
|
||||
|
@ -174,11 +182,11 @@ class S3BlobStore extends AbstractComponent implements BlobStore {
|
|||
* Constructs canned acl from string
|
||||
*/
|
||||
public static CannedAccessControlList initCannedACL(String cannedACL) {
|
||||
if (cannedACL == null || cannedACL.equals("")) {
|
||||
if ((cannedACL == null) || cannedACL.equals("")) {
|
||||
return CannedAccessControlList.Private;
|
||||
}
|
||||
|
||||
for (CannedAccessControlList cur : CannedAccessControlList.values()) {
|
||||
for (final CannedAccessControlList cur : CannedAccessControlList.values()) {
|
||||
if (cur.toString().equalsIgnoreCase(cannedACL)) {
|
||||
return cur;
|
||||
}
|
||||
|
|
|
@ -24,10 +24,11 @@ import java.util.HashMap;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -38,7 +39,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
/**
|
||||
* A container for settings used to create an S3 client.
|
||||
*/
|
||||
class S3ClientSettings {
|
||||
final class S3ClientSettings {
|
||||
|
||||
// prefix for s3 client settings
|
||||
private static final String PREFIX = "s3.client.";
|
||||
|
@ -119,7 +120,7 @@ class S3ClientSettings {
|
|||
/** Whether the s3 client should use an exponential backoff retry policy. */
|
||||
final boolean throttleRetries;
|
||||
|
||||
private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol,
|
||||
protected S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol,
|
||||
String proxyHost, int proxyPort, String proxyUsername, String proxyPassword,
|
||||
int readTimeoutMillis, int maxRetries, boolean throttleRetries) {
|
||||
this.credentials = credentials;
|
||||
|
@ -140,9 +141,9 @@ class S3ClientSettings {
|
|||
* Note this will always at least return a client named "default".
|
||||
*/
|
||||
static Map<String, S3ClientSettings> load(Settings settings) {
|
||||
Set<String> clientNames = settings.getGroups(PREFIX).keySet();
|
||||
Map<String, S3ClientSettings> clients = new HashMap<>();
|
||||
for (String clientName : clientNames) {
|
||||
final Set<String> clientNames = settings.getGroups(PREFIX).keySet();
|
||||
final Map<String, S3ClientSettings> clients = new HashMap<>();
|
||||
for (final String clientName : clientNames) {
|
||||
clients.put(clientName, getClientSettings(settings, clientName));
|
||||
}
|
||||
if (clients.containsKey("default") == false) {
|
||||
|
@ -153,23 +154,64 @@ class S3ClientSettings {
|
|||
return Collections.unmodifiableMap(clients);
|
||||
}
|
||||
|
||||
// pkg private for tests
|
||||
/** Parse settings for a single client. */
|
||||
static S3ClientSettings getClientSettings(Settings settings, String clientName) {
|
||||
static Map<String, S3ClientSettings> overrideCredentials(Map<String, S3ClientSettings> clientsSettings,
|
||||
BasicAWSCredentials credentials) {
|
||||
final MapBuilder<String, S3ClientSettings> mapBuilder = new MapBuilder<>();
|
||||
for (final Map.Entry<String, S3ClientSettings> entry : clientsSettings.entrySet()) {
|
||||
final S3ClientSettings s3ClientSettings = new S3ClientSettings(credentials, entry.getValue().endpoint,
|
||||
entry.getValue().protocol, entry.getValue().proxyHost, entry.getValue().proxyPort, entry.getValue().proxyUsername,
|
||||
entry.getValue().proxyPassword, entry.getValue().readTimeoutMillis, entry.getValue().maxRetries,
|
||||
entry.getValue().throttleRetries);
|
||||
mapBuilder.put(entry.getKey(), s3ClientSettings);
|
||||
}
|
||||
return mapBuilder.immutableMap();
|
||||
}
|
||||
|
||||
static boolean checkDeprecatedCredentials(Settings repositorySettings) {
|
||||
if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) {
|
||||
if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) {
|
||||
throw new IllegalArgumentException("Repository setting [" + S3Repository.ACCESS_KEY_SETTING.getKey()
|
||||
+ " must be accompanied by setting [" + S3Repository.SECRET_KEY_SETTING.getKey() + "]");
|
||||
}
|
||||
return true;
|
||||
} else if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings)) {
|
||||
throw new IllegalArgumentException("Repository setting [" + S3Repository.SECRET_KEY_SETTING.getKey()
|
||||
+ " must be accompanied by setting [" + S3Repository.ACCESS_KEY_SETTING.getKey() + "]");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// backcompat for reading keys out of repository settings (clusterState)
|
||||
static BasicAWSCredentials loadDeprecatedCredentials(Settings repositorySettings) {
|
||||
assert checkDeprecatedCredentials(repositorySettings);
|
||||
try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings);
|
||||
SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) {
|
||||
return new BasicAWSCredentials(key.toString(), secret.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static BasicAWSCredentials loadCredentials(Settings settings, String clientName) {
|
||||
try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING);
|
||||
SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING);
|
||||
SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING);
|
||||
SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) {
|
||||
BasicAWSCredentials credentials = null;
|
||||
SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING);) {
|
||||
if (accessKey.length() != 0) {
|
||||
if (secretKey.length() != 0) {
|
||||
credentials = new BasicAWSCredentials(accessKey.toString(), secretKey.toString());
|
||||
return new BasicAWSCredentials(accessKey.toString(), secretKey.toString());
|
||||
} else {
|
||||
throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]");
|
||||
}
|
||||
} else if (secretKey.length() != 0) {
|
||||
throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// pkg private for tests
|
||||
/** Parse settings for a single client. */
|
||||
static S3ClientSettings getClientSettings(Settings settings, String clientName) {
|
||||
final BasicAWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName);
|
||||
try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING);
|
||||
SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) {
|
||||
return new S3ClientSettings(
|
||||
credentials,
|
||||
getConfigValue(settings, clientName, ENDPOINT_SETTING),
|
||||
|
@ -187,7 +229,7 @@ class S3ClientSettings {
|
|||
|
||||
private static <T> T getConfigValue(Settings settings, String clientName,
|
||||
Setting.AffixSetting<T> clientSetting) {
|
||||
Setting<T> concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName);
|
||||
final Setting<T> concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName);
|
||||
return concreteSetting.get(settings);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
|
@ -35,6 +36,9 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
|
|||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Shared file system implementation of the BlobStoreRepository
|
||||
|
@ -134,6 +138,8 @@ class S3Repository extends BlobStoreRepository {
|
|||
*/
|
||||
static final Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");
|
||||
|
||||
static final Setting<String> CLIENT_NAME = new Setting<>("client", "default", Function.identity());
|
||||
|
||||
/**
|
||||
* Specifies the path within bucket to repository data. Defaults to root directory.
|
||||
*/
|
||||
|
@ -143,23 +149,24 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
private final BlobPath basePath;
|
||||
|
||||
private ByteSizeValue chunkSize;
|
||||
private final ByteSizeValue chunkSize;
|
||||
|
||||
private boolean compress;
|
||||
private final boolean compress;
|
||||
|
||||
/**
|
||||
* Constructs an s3 backed repository
|
||||
*/
|
||||
S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) {
|
||||
S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry,
|
||||
AwsS3Service awsService) throws IOException {
|
||||
super(metadata, settings, namedXContentRegistry);
|
||||
|
||||
String bucket = BUCKET_SETTING.get(metadata.settings());
|
||||
final String bucket = BUCKET_SETTING.get(metadata.settings());
|
||||
if (bucket == null) {
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository");
|
||||
}
|
||||
|
||||
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
|
||||
ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
|
||||
final boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
|
||||
final ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
|
||||
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
|
||||
this.compress = COMPRESS_SETTING.get(metadata.settings());
|
||||
|
||||
|
@ -170,17 +177,22 @@ class S3Repository extends BlobStoreRepository {
|
|||
}
|
||||
|
||||
// Parse and validate the user's S3 Storage Class setting
|
||||
String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
|
||||
String cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
|
||||
final String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
|
||||
final String cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
|
||||
final String clientName = CLIENT_NAME.get(metadata.settings());
|
||||
|
||||
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
|
||||
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
|
||||
AmazonS3 client = s3Service.client(metadata.settings());
|
||||
blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
// deprecated behavior: override client credentials from the cluster state
|
||||
// (repository settings)
|
||||
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
overrideCredentialsFromClusterState(awsService);
|
||||
}
|
||||
blobStore = new S3BlobStore(settings, awsService, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
|
||||
String basePath = BASE_PATH_SETTING.get(metadata.settings());
|
||||
final String basePath = BASE_PATH_SETTING.get(metadata.settings());
|
||||
if (Strings.hasLength(basePath)) {
|
||||
this.basePath = new BlobPath().add(basePath);
|
||||
} else {
|
||||
|
@ -207,4 +219,14 @@ class S3Repository extends BlobStoreRepository {
|
|||
protected ByteSizeValue chunkSize() {
|
||||
return chunkSize;
|
||||
}
|
||||
|
||||
void overrideCredentialsFromClusterState(AwsS3Service awsService) {
|
||||
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead "
|
||||
+ "store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings());
|
||||
// hack, but that's ok because the whole if branch should be axed
|
||||
final Map<String, S3ClientSettings> prevSettings = awsService.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY));
|
||||
final Map<String, S3ClientSettings> newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials);
|
||||
awsService.refreshAndClearCache(newSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Arrays;
|
||||
|
@ -28,18 +29,20 @@ import java.util.Map;
|
|||
|
||||
import com.amazonaws.util.json.Jackson;
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.plugins.RepositoryPlugin;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
|
||||
/**
|
||||
* A plugin to add a repository type that writes to and from the AWS S3.
|
||||
*/
|
||||
public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
||||
public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin {
|
||||
|
||||
static {
|
||||
SpecialPermission.check();
|
||||
|
@ -50,30 +53,40 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
// ClientConfiguration clinit has some classloader problems
|
||||
// TODO: fix that
|
||||
Class.forName("com.amazonaws.ClientConfiguration");
|
||||
} catch (ClassNotFoundException e) {
|
||||
} catch (final ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
private final Map<String, S3ClientSettings> clientsSettings;
|
||||
private final AwsS3Service awsS3Service;
|
||||
|
||||
public S3RepositoryPlugin(Settings settings) {
|
||||
this.awsS3Service = getAwsS3Service(settings);
|
||||
// eagerly load client settings so that secure settings are read
|
||||
clientsSettings = S3ClientSettings.load(settings);
|
||||
assert clientsSettings.isEmpty() == false : "always at least have 'default'";
|
||||
final Map<String, S3ClientSettings> clientsSettings = S3ClientSettings.load(settings);
|
||||
this.awsS3Service.refreshAndClearCache(clientsSettings);
|
||||
}
|
||||
|
||||
// overridable for tests
|
||||
protected AwsS3Service createStorageService(Settings settings) {
|
||||
return new InternalAwsS3Service(settings, clientsSettings);
|
||||
protected S3RepositoryPlugin(AwsS3Service awsS3Service) {
|
||||
this.awsS3Service = awsS3Service;
|
||||
}
|
||||
|
||||
// proxy method for testing
|
||||
protected S3Repository getS3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry)
|
||||
throws IOException {
|
||||
return new S3Repository(metadata, settings, namedXContentRegistry, awsS3Service);
|
||||
}
|
||||
|
||||
// proxy method for testing
|
||||
protected AwsS3Service getAwsS3Service(Settings settings) {
|
||||
return new InternalAwsS3Service(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
|
||||
return Collections.singletonMap(S3Repository.TYPE,
|
||||
(metadata) -> new S3Repository(metadata, env.settings(), namedXContentRegistry, createStorageService(env.settings())));
|
||||
return Collections.singletonMap(S3Repository.TYPE, (metadata) -> getS3Repository(metadata, env.settings(), namedXContentRegistry));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -94,4 +107,16 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
S3Repository.ACCESS_KEY_SETTING,
|
||||
S3Repository.SECRET_KEY_SETTING);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(Settings settings) {
|
||||
// secure settings should be readable
|
||||
final Map<String, S3ClientSettings> clientsSettings = S3ClientSettings.load(settings);
|
||||
awsS3Service.refreshAndClearCache(clientsSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
awsS3Service.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,4 +37,7 @@ grant {
|
|||
|
||||
// s3 client opens socket connections for to access repository
|
||||
permission java.net.SocketPermission "*", "connect";
|
||||
|
||||
// only for tests : org.elasticsearch.repositories.s3.S3RepositoryPlugin
|
||||
permission java.util.PropertyPermission "es.allow_insecure_settings", "read,write";
|
||||
};
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.ObjectListing;
|
||||
import com.amazonaws.services.s3.model.S3ObjectSummary;
|
||||
|
@ -180,13 +179,13 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
|
||||
Settings settings = internalCluster().getInstance(Settings.class);
|
||||
Settings bucket = settings.getByPrefix("repositories.s3.");
|
||||
AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings);
|
||||
|
||||
String bucketName = bucket.get("bucket");
|
||||
logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
|
||||
List<S3ObjectSummary> summaries = s3Client.listObjects(bucketName, basePath).getObjectSummaries();
|
||||
for (S3ObjectSummary summary : summaries) {
|
||||
assertThat(s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256"));
|
||||
try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) {
|
||||
String bucketName = bucket.get("bucket");
|
||||
logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
|
||||
List<S3ObjectSummary> summaries = s3Client.client().listObjects(bucketName, basePath).getObjectSummaries();
|
||||
for (S3ObjectSummary summary : summaries) {
|
||||
assertThat(s3Client.client().getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256"));
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("--> delete some data");
|
||||
|
@ -443,8 +442,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
// We check that settings has been set in elasticsearch.yml integration test file
|
||||
// as described in README
|
||||
assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue());
|
||||
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY);
|
||||
try {
|
||||
try (AmazonS3Reference s3Client = internalCluster().getInstance(AwsS3Service.class).client("default")) {
|
||||
ObjectListing prevListing = null;
|
||||
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
|
||||
//we can do at most 1K objects per delete
|
||||
|
@ -454,9 +452,9 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
while (true) {
|
||||
ObjectListing list;
|
||||
if (prevListing != null) {
|
||||
list = client.listNextBatchOfObjects(prevListing);
|
||||
list = s3Client.client().listNextBatchOfObjects(prevListing);
|
||||
} else {
|
||||
list = client.listObjects(bucketName, basePath);
|
||||
list = s3Client.client().listObjects(bucketName, basePath);
|
||||
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
|
||||
}
|
||||
for (S3ObjectSummary summary : list.getObjectSummaries()) {
|
||||
|
@ -464,7 +462,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
//Every 500 objects batch the delete request
|
||||
if (keys.size() > 500) {
|
||||
multiObjectDeleteRequest.setKeys(keys);
|
||||
client.deleteObjects(multiObjectDeleteRequest);
|
||||
s3Client.client().deleteObjects(multiObjectDeleteRequest);
|
||||
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
|
||||
keys.clear();
|
||||
}
|
||||
|
@ -477,7 +475,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
}
|
||||
if (!keys.isEmpty()) {
|
||||
multiObjectDeleteRequest.setKeys(keys);
|
||||
client.deleteObjects(multiObjectDeleteRequest);
|
||||
s3Client.client().deleteObjects(multiObjectDeleteRequest);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex);
|
||||
|
|
|
@ -727,4 +727,9 @@ public class AmazonS3Wrapper extends AbstractAmazonS3 {
|
|||
public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws AmazonClientException, AmazonServiceException {
|
||||
return delegate.headBucket(headBucketRequest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
delegate.shutdown();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,75 +21,89 @@ package org.elasticsearch.repositories.s3;
|
|||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.internal.StaticCredentialsProvider;
|
||||
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class AwsS3ServiceImplTests extends ESTestCase {
|
||||
|
||||
public void testAWSCredentialsWithSystemProviders() {
|
||||
S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default");
|
||||
AWSCredentialsProvider credentialsProvider =
|
||||
InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY);
|
||||
public void testAWSCredentialsDefaultToInstanceProviders() {
|
||||
final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
|
||||
final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, inexistentClientName);
|
||||
final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, clientSettings);
|
||||
assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class));
|
||||
}
|
||||
|
||||
public void testAwsCredsDefaultSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("s3.client.default.access_key", "aws_key");
|
||||
secureSettings.setString("s3.client.default.secret_key", "aws_secret");
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
assertCredentials(Settings.EMPTY, settings, "aws_key", "aws_secret");
|
||||
public void testAWSCredentialsFromKeystore() {
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final String clientNamePrefix = "some_client_name_";
|
||||
final int clientsCount = randomIntBetween(0, 4);
|
||||
for (int i = 0; i < clientsCount; i++) {
|
||||
final String clientName = clientNamePrefix + i;
|
||||
secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key");
|
||||
secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key");
|
||||
}
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
final Map<String, S3ClientSettings> allClientsSettings = S3ClientSettings.load(settings);
|
||||
// no less, no more
|
||||
assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default
|
||||
for (int i = 0; i < clientsCount; i++) {
|
||||
final String clientName = clientNamePrefix + i;
|
||||
final S3ClientSettings someClientSettings = allClientsSettings.get(clientName);
|
||||
final AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, someClientSettings);
|
||||
assertThat(credentialsProvider, instanceOf(StaticCredentialsProvider.class));
|
||||
assertThat(credentialsProvider.getCredentials().getAWSAccessKeyId(), is(clientName + "_aws_access_key"));
|
||||
assertThat(credentialsProvider.getCredentials().getAWSSecretKey(), is(clientName + "_aws_secret_key"));
|
||||
}
|
||||
// test default exists and is an Instance provider
|
||||
final S3ClientSettings defaultClientSettings = allClientsSettings.get("default");
|
||||
final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings);
|
||||
assertThat(defaultCredentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class));
|
||||
}
|
||||
|
||||
public void testAwsCredsExplicitConfigSettings() {
|
||||
Settings repositorySettings = Settings.builder().put(InternalAwsS3Service.CLIENT_NAME.getKey(), "myconfig").build();
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("s3.client.myconfig.access_key", "aws_key");
|
||||
secureSettings.setString("s3.client.myconfig.secret_key", "aws_secret");
|
||||
secureSettings.setString("s3.client.default.access_key", "wrong_key");
|
||||
secureSettings.setString("s3.client.default.secret_key", "wrong_secret");
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
assertCredentials(repositorySettings, settings, "aws_key", "aws_secret");
|
||||
public void testSetDefaultCredential() {
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final String awsAccessKey = randomAlphaOfLength(8);
|
||||
final String awsSecretKey = randomAlphaOfLength(8);
|
||||
secureSettings.setString("s3.client.default.access_key", awsAccessKey);
|
||||
secureSettings.setString("s3.client.default.secret_key", awsSecretKey);
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
final Map<String, S3ClientSettings> allClientsSettings = S3ClientSettings.load(settings);
|
||||
assertThat(allClientsSettings.size(), is(1));
|
||||
// test default exists and is an Instance provider
|
||||
final S3ClientSettings defaultClientSettings = allClientsSettings.get("default");
|
||||
final AWSCredentialsProvider defaultCredentialsProvider = InternalAwsS3Service.buildCredentials(logger, defaultClientSettings);
|
||||
assertThat(defaultCredentialsProvider, instanceOf(StaticCredentialsProvider.class));
|
||||
assertThat(defaultCredentialsProvider.getCredentials().getAWSAccessKeyId(), is(awsAccessKey));
|
||||
assertThat(defaultCredentialsProvider.getCredentials().getAWSSecretKey(), is(awsSecretKey));
|
||||
}
|
||||
|
||||
public void testRepositorySettingsCredentialsDisallowed() {
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key")
|
||||
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret"));
|
||||
assertThat(e.getMessage(), containsString("Setting [access_key] is insecure"));
|
||||
}
|
||||
|
||||
public void testRepositorySettingsCredentialsMissingKey() {
|
||||
Settings repositorySettings = Settings.builder().put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret"));
|
||||
assertThat(e.getMessage(), containsString("must be accompanied by setting [access_key]"));
|
||||
}
|
||||
|
||||
public void testRepositorySettingsCredentialsMissingSecret() {
|
||||
Settings repositorySettings = Settings.builder().put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key").build();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
assertCredentials(repositorySettings, Settings.EMPTY, "aws_key", "aws_secret"));
|
||||
assertThat(e.getMessage(), containsString("must be accompanied by setting [secret_key]"));
|
||||
}
|
||||
|
||||
private void assertCredentials(Settings singleRepositorySettings, Settings settings,
|
||||
String expectedKey, String expectedSecret) {
|
||||
String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings);
|
||||
S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName);
|
||||
AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger,
|
||||
clientSettings, singleRepositorySettings).getCredentials();
|
||||
assertThat(credentials.getAWSAccessKeyId(), is(expectedKey));
|
||||
assertThat(credentials.getAWSSecretKey(), is(expectedSecret));
|
||||
public void testCredentialsIncomplete() {
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final String clientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
|
||||
final boolean missingOrMissing = randomBoolean();
|
||||
if (missingOrMissing) {
|
||||
secureSettings.setString("s3.client." + clientName + ".access_key", "aws_access_key");
|
||||
} else {
|
||||
secureSettings.setString("s3.client." + clientName + ".secret_key", "aws_secret_key");
|
||||
}
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
final Exception e = expectThrows(IllegalArgumentException.class, () -> S3ClientSettings.load(settings));
|
||||
if (missingOrMissing) {
|
||||
assertThat(e.getMessage(), containsString("Missing secret key for s3 client [" + clientName + "]"));
|
||||
} else {
|
||||
assertThat(e.getMessage(), containsString("Missing access key for s3 client [" + clientName + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAWSDefaultConfiguration() {
|
||||
|
@ -98,10 +112,10 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAWSConfigurationWithAwsSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("s3.client.default.proxy.username", "aws_proxy_username");
|
||||
secureSettings.setString("s3.client.default.proxy.password", "aws_proxy_password");
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.setSecureSettings(secureSettings)
|
||||
.put("s3.client.default.protocol", "http")
|
||||
.put("s3.client.default.proxy.host", "aws_proxy_host")
|
||||
|
@ -113,7 +127,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRepositoryMaxRetries() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.put("s3.client.default.max_retries", 5)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null,
|
||||
|
@ -123,7 +137,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
public void testRepositoryThrottleRetries() {
|
||||
final boolean throttling = randomBoolean();
|
||||
|
||||
Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build();
|
||||
final Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build();
|
||||
launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000);
|
||||
}
|
||||
|
||||
|
@ -137,8 +151,8 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
boolean expectedUseThrottleRetries,
|
||||
int expectedReadTimeout) {
|
||||
|
||||
S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default");
|
||||
ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings);
|
||||
final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default");
|
||||
final ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings);
|
||||
|
||||
assertThat(configuration.getResponseMetadataCacheSize(), is(0));
|
||||
assertThat(configuration.getProtocol(), is(expectedProtocol));
|
||||
|
@ -152,15 +166,15 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testEndpointSetting() {
|
||||
Settings settings = Settings.builder()
|
||||
final Settings settings = Settings.builder()
|
||||
.put("s3.client.default.endpoint", "s3.endpoint")
|
||||
.build();
|
||||
assertEndpoint(Settings.EMPTY, settings, "s3.endpoint");
|
||||
}
|
||||
|
||||
private void assertEndpoint(Settings repositorySettings, Settings settings, String expectedEndpoint) {
|
||||
String configName = InternalAwsS3Service.CLIENT_NAME.get(repositorySettings);
|
||||
S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName);
|
||||
final String configName = S3Repository.CLIENT_NAME.get(repositorySettings);
|
||||
final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName);
|
||||
assertThat(clientSettings.endpoint, is(expectedEndpoint));
|
||||
}
|
||||
|
||||
|
|
|
@ -157,6 +157,11 @@ class MockAmazonS3 extends AbstractAmazonS3 {
|
|||
throw exception;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
// TODO check close
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) throws SdkClientException {
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@SuppressForbidden(reason = "test fixture requires System.setProperty")
|
||||
public class RepositoryCredentialsTests extends ESTestCase {
|
||||
|
||||
static {
|
||||
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
|
||||
// required for client settings overwriting
|
||||
System.setProperty("es.allow_insecure_settings", "true");
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
static final class ProxyS3RepositoryPlugin extends S3RepositoryPlugin {
|
||||
|
||||
static final class ClientAndCredentials extends AmazonS3Wrapper {
|
||||
final AWSCredentialsProvider credentials;
|
||||
|
||||
ClientAndCredentials(AmazonS3 delegate, AWSCredentialsProvider credentials) {
|
||||
super(delegate);
|
||||
this.credentials = credentials;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean doesBucketExist(String bucketName) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static final class ProxyInternalAwsS3Service extends InternalAwsS3Service {
|
||||
|
||||
ProxyInternalAwsS3Service(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) {
|
||||
final AmazonS3 client = super.buildClient(credentials, configuration);
|
||||
return new ClientAndCredentials(client, credentials);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
protected ProxyS3RepositoryPlugin(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AwsS3Service getAwsS3Service(Settings settings) {
|
||||
return new ProxyInternalAwsS3Service(settings);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testRepositoryCredentialsOverrideSecureCredentials() throws IOException {
|
||||
final int clientsCount = randomIntBetween(0, 4);
|
||||
final String[] clientNames = new String[clientsCount + 1];
|
||||
clientNames[0] = "default";
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("s3.client.default.access_key", "secure_aws_key");
|
||||
secureSettings.setString("s3.client.default.secret_key", "secure_aws_secret");
|
||||
for (int i = 0; i < clientsCount; i++) {
|
||||
final String clientName = "client_" + i;
|
||||
secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key_" + i);
|
||||
secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret_" + i);
|
||||
clientNames[i + 1] = clientName;
|
||||
}
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
// repository settings for credentials override node secure settings
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.CLIENT_NAME.getKey(), randomFrom(clientNames))
|
||||
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key")
|
||||
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
|
||||
S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
|
||||
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
|
||||
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials();
|
||||
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
|
||||
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
|
||||
}
|
||||
assertWarnings(
|
||||
"[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!"
|
||||
+ " See the breaking changes documentation for the next major version.",
|
||||
"Using s3 access/secret key from repository settings. Instead store these in named clients and"
|
||||
+ " the elasticsearch keystore for secure settings.",
|
||||
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!"
|
||||
+ " See the breaking changes documentation for the next major version.");
|
||||
}
|
||||
|
||||
public void testRepositoryCredentialsOnly() throws IOException {
|
||||
// repository settings for credentials override node secure settings
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock",
|
||||
Settings.builder()
|
||||
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key")
|
||||
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret")
|
||||
.build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(Settings.EMPTY);
|
||||
S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY);
|
||||
AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
|
||||
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials.getCredentials();
|
||||
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
|
||||
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
|
||||
}
|
||||
assertWarnings(
|
||||
"[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!"
|
||||
+ " See the breaking changes documentation for the next major version.",
|
||||
"Using s3 access/secret key from repository settings. Instead store these in named clients and"
|
||||
+ " the elasticsearch keystore for secure settings.",
|
||||
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!"
|
||||
+ " See the breaking changes documentation for the next major version.");
|
||||
}
|
||||
|
||||
public void testReinitSecureCredentials() throws IOException {
|
||||
final String clientName = randomFrom("default", "some_client");
|
||||
// initial client node settings
|
||||
final MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("s3.client." + clientName + ".access_key", "secure_aws_key");
|
||||
secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret");
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
// repository settings
|
||||
final Settings.Builder builder = Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName);
|
||||
final boolean repositorySettings = randomBoolean();
|
||||
if (repositorySettings) {
|
||||
builder.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key");
|
||||
builder.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret");
|
||||
}
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
|
||||
S3Repository s3repo = s3Plugin.getS3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY)) {
|
||||
try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
|
||||
final AWSCredentials credentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials
|
||||
.getCredentials();
|
||||
if (repositorySettings) {
|
||||
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
|
||||
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
|
||||
} else {
|
||||
assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key"));
|
||||
assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret"));
|
||||
}
|
||||
// new settings
|
||||
final MockSecureSettings newSecureSettings = new MockSecureSettings();
|
||||
newSecureSettings.setString("s3.client." + clientName + ".access_key", "new_secret_aws_key");
|
||||
newSecureSettings.setString("s3.client." + clientName + ".secret_key", "new_secret_aws_secret");
|
||||
final Settings newSettings = Settings.builder().setSecureSettings(newSecureSettings).build();
|
||||
// reload S3 plugin settings
|
||||
s3Plugin.reload(newSettings);
|
||||
// check the not-yet-closed client reference still has the same credentials
|
||||
if (repositorySettings) {
|
||||
assertThat(credentials.getAWSAccessKeyId(), is("insecure_aws_key"));
|
||||
assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret"));
|
||||
} else {
|
||||
assertThat(credentials.getAWSAccessKeyId(), is("secure_aws_key"));
|
||||
assertThat(credentials.getAWSSecretKey(), is("secure_aws_secret"));
|
||||
}
|
||||
}
|
||||
// check credentials have been updated
|
||||
try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
|
||||
final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials
|
||||
.getCredentials();
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret"));
|
||||
}
|
||||
}
|
||||
if (repositorySettings) {
|
||||
assertWarnings(
|
||||
"[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release!"
|
||||
+ " See the breaking changes documentation for the next major version.",
|
||||
"Using s3 access/secret key from repository settings. Instead store these in named clients and"
|
||||
+ " the elasticsearch keystore for secure settings.",
|
||||
"[access_key] setting was deprecated in Elasticsearch and will be removed in a future release!"
|
||||
+ " See the breaking changes documentation for the next major version.");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class RepositorySettingsCredentialsTests extends ESTestCase {
|
||||
|
||||
public void testRepositorySettingsCredentials() {
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "aws_key")
|
||||
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "aws_secret").build();
|
||||
AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger,
|
||||
S3ClientSettings.getClientSettings(Settings.EMPTY, "default"), repositorySettings).getCredentials();
|
||||
assertEquals("aws_key", credentials.getAWSAccessKeyId());
|
||||
assertEquals("aws_secret", credentials.getAWSSecretKey());
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING },
|
||||
"Using s3 access/secret key from repository settings. " +
|
||||
"Instead store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
}
|
||||
}
|
|
@ -57,6 +57,7 @@ import static org.mockito.Mockito.mock;
|
|||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
|
||||
public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
|
||||
|
@ -74,7 +75,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
final S3BlobStore blobStore = mock(S3BlobStore.class);
|
||||
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
blobContainer.executeSingleUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize));
|
||||
assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage());
|
||||
}
|
||||
|
@ -86,7 +87,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
|
||||
final String blobName = randomAlphaOfLengthBetween(1, 10);
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
blobContainer.executeSingleUpload(blobStore, blobName, new ByteArrayInputStream(new byte[0]), ByteSizeUnit.MB.toBytes(2)));
|
||||
assertEquals("Upload request size [2097152] can't be larger than buffer size", e.getMessage());
|
||||
}
|
||||
|
@ -121,7 +122,8 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
}
|
||||
|
||||
final AmazonS3 client = mock(AmazonS3.class);
|
||||
when(blobStore.client()).thenReturn(client);
|
||||
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
|
||||
when(blobStore.clientReference()).thenReturn(clientReference);
|
||||
|
||||
final ArgumentCaptor<PutObjectRequest> argumentCaptor = ArgumentCaptor.forClass(PutObjectRequest.class);
|
||||
when(client.putObject(argumentCaptor.capture())).thenReturn(new PutObjectResult());
|
||||
|
@ -146,7 +148,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
final S3BlobStore blobStore = mock(S3BlobStore.class);
|
||||
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize)
|
||||
);
|
||||
assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage());
|
||||
|
@ -157,7 +159,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
final S3BlobStore blobStore = mock(S3BlobStore.class);
|
||||
final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore);
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
blobContainer.executeMultipartUpload(blobStore, randomAlphaOfLengthBetween(1, 10), null, blobSize)
|
||||
);
|
||||
assertEquals("Multipart upload request size [" + blobSize + "] can't be smaller than 5mb", e.getMessage());
|
||||
|
@ -191,7 +193,8 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
}
|
||||
|
||||
final AmazonS3 client = mock(AmazonS3.class);
|
||||
when(blobStore.client()).thenReturn(client);
|
||||
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
|
||||
when(blobStore.clientReference()).thenReturn(clientReference);
|
||||
|
||||
final ArgumentCaptor<InitiateMultipartUploadRequest> initArgCaptor = ArgumentCaptor.forClass(InitiateMultipartUploadRequest.class);
|
||||
final InitiateMultipartUploadResult initResult = new InitiateMultipartUploadResult();
|
||||
|
@ -201,7 +204,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
final ArgumentCaptor<UploadPartRequest> uploadArgCaptor = ArgumentCaptor.forClass(UploadPartRequest.class);
|
||||
|
||||
final List<String> expectedEtags = new ArrayList<>();
|
||||
long partSize = Math.min(bufferSize, blobSize);
|
||||
final long partSize = Math.min(bufferSize, blobSize);
|
||||
long totalBytes = 0;
|
||||
do {
|
||||
expectedEtags.add(randomAlphaOfLength(50));
|
||||
|
@ -238,7 +241,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
assertEquals(numberOfParts.v1().intValue(), uploadRequests.size());
|
||||
|
||||
for (int i = 0; i < uploadRequests.size(); i++) {
|
||||
UploadPartRequest uploadRequest = uploadRequests.get(i);
|
||||
final UploadPartRequest uploadRequest = uploadRequests.get(i);
|
||||
|
||||
assertEquals(bucketName, uploadRequest.getBucketName());
|
||||
assertEquals(blobPath.buildAsString() + blobName, uploadRequest.getKey());
|
||||
|
@ -260,7 +263,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
assertEquals(blobPath.buildAsString() + blobName, compRequest.getKey());
|
||||
assertEquals(initResult.getUploadId(), compRequest.getUploadId());
|
||||
|
||||
List<String> actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList());
|
||||
final List<String> actualETags = compRequest.getPartETags().stream().map(PartETag::getETag).collect(Collectors.toList());
|
||||
assertEquals(expectedEtags, actualETags);
|
||||
}
|
||||
|
||||
|
@ -278,7 +281,11 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
when(blobStore.getStorageClass()).thenReturn(randomFrom(StorageClass.values()));
|
||||
|
||||
final AmazonS3 client = mock(AmazonS3.class);
|
||||
when(blobStore.client()).thenReturn(client);
|
||||
final AmazonS3Reference clientReference = new AmazonS3Reference(client);
|
||||
doAnswer(invocation -> {
|
||||
clientReference.incRef();
|
||||
return clientReference;
|
||||
}).when(blobStore).clientReference();
|
||||
|
||||
final String uploadId = randomAlphaOfLength(25);
|
||||
|
||||
|
@ -360,7 +367,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
}
|
||||
|
||||
public void testNumberOfMultipartsWithZeroPartSize() {
|
||||
IllegalArgumentException e =
|
||||
final IllegalArgumentException e =
|
||||
expectThrows(IllegalArgumentException.class, () -> S3BlobContainer.numberOfMultiparts(randomNonNegativeLong(), 0L));
|
||||
assertEquals("Part size must be greater than zero", e.getMessage());
|
||||
}
|
||||
|
@ -382,7 +389,7 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
|||
|
||||
// Fits in N parts plus a bit more
|
||||
final long remaining = randomIntBetween(1, (size > Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) size - 1);
|
||||
assertNumberOfMultiparts(factor + 1, remaining, size * factor + remaining, size);
|
||||
assertNumberOfMultiparts(factor + 1, remaining, (size * factor) + remaining, size);
|
||||
}
|
||||
|
||||
private static void assertNumberOfMultiparts(final int expectedParts, final long expectedRemaining, long totalSize, long partSize) {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.CannedAccessControlList;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
|
||||
|
@ -50,7 +49,6 @@ import java.util.concurrent.ConcurrentMap;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
@ -87,11 +85,9 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
|
|||
|
||||
@Override
|
||||
protected void createTestRepository(final String name) {
|
||||
assertAcked(client().admin().cluster().preparePutRepository(name)
|
||||
.setType(S3Repository.TYPE)
|
||||
.setSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().preparePutRepository(name).setType(S3Repository.TYPE).setSettings(Settings.builder()
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucket)
|
||||
.put(InternalAwsS3Service.CLIENT_NAME.getKey(), client)
|
||||
.put(S3Repository.CLIENT_NAME.getKey(), client)
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize)
|
||||
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption)
|
||||
.put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL)
|
||||
|
@ -113,13 +109,17 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
|
|||
|
||||
@Override
|
||||
public Map<String, Repository.Factory> getRepositories(final Environment env, final NamedXContentRegistry registry) {
|
||||
return Collections.singletonMap(S3Repository.TYPE, (metadata) ->
|
||||
new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings(), emptyMap()) {
|
||||
@Override
|
||||
public synchronized AmazonS3 client(final Settings repositorySettings) {
|
||||
return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
}
|
||||
}));
|
||||
return Collections.singletonMap(S3Repository.TYPE,
|
||||
(metadata) -> new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings()) {
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(String clientName) {
|
||||
return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass));
|
||||
}
|
||||
}) {
|
||||
@Override
|
||||
void overrideCredentialsFromClusterState(AwsS3Service awsService) {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,15 @@ public class S3BlobStoreTests extends ESBlobStoreTestCase {
|
|||
storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString();
|
||||
}
|
||||
|
||||
AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
return new S3BlobStore(Settings.EMPTY, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
final String theClientName = randomAlphaOfLength(4);
|
||||
final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
final AwsS3Service service = new InternalAwsS3Service(Settings.EMPTY) {
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(String clientName) {
|
||||
assert theClientName.equals(clientName);
|
||||
return new AmazonS3Reference(client);
|
||||
}
|
||||
};
|
||||
return new S3BlobStore(Settings.EMPTY, service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AbstractAmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
|
@ -31,18 +30,25 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
|||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class S3RepositoryTests extends ESTestCase {
|
||||
|
||||
private static class DummyS3Client extends AbstractAmazonS3 {
|
||||
|
||||
@Override
|
||||
public boolean doesBucketExist(String bucketName) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
// TODO check is closed
|
||||
}
|
||||
}
|
||||
|
||||
private static class DummyS3Service extends AbstractLifecycleComponent implements AwsS3Service {
|
||||
|
@ -56,53 +62,70 @@ public class S3RepositoryTests extends ESTestCase {
|
|||
@Override
|
||||
protected void doClose() {}
|
||||
@Override
|
||||
public AmazonS3 client(Settings settings) {
|
||||
return new DummyS3Client();
|
||||
public AmazonS3Reference client(String clientName) {
|
||||
return new AmazonS3Reference(new DummyS3Client());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, S3ClientSettings> refreshAndClearCache(Map<String, S3ClientSettings> clientsSettings) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidChunkBufferSizeSettings() throws IOException {
|
||||
// chunk < buffer should fail
|
||||
assertInvalidBuffer(10, 5, RepositoryException.class, "chunk_size (5mb) can't be lower than buffer_size (10mb).");
|
||||
final Settings s1 = bufferAndChunkSettings(10, 5);
|
||||
final Exception e1 = expectThrows(RepositoryException.class,
|
||||
() -> new S3Repository(getRepositoryMetaData(s1), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()));
|
||||
assertThat(e1.getMessage(), containsString("chunk_size (5mb) can't be lower than buffer_size (10mb)"));
|
||||
// chunk > buffer should pass
|
||||
assertValidBuffer(5, 10);
|
||||
final Settings s2 = bufferAndChunkSettings(5, 10);
|
||||
new S3Repository(getRepositoryMetaData(s2), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close();
|
||||
// chunk = buffer should pass
|
||||
assertValidBuffer(5, 5);
|
||||
final Settings s3 = bufferAndChunkSettings(5, 5);
|
||||
new S3Repository(getRepositoryMetaData(s3), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service()).close();
|
||||
// buffer < 5mb should fail
|
||||
assertInvalidBuffer(4, 10, IllegalArgumentException.class,
|
||||
"failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]");
|
||||
// chunk > 5tb should fail
|
||||
assertInvalidBuffer(5, 6000000, IllegalArgumentException.class,
|
||||
"failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]");
|
||||
final Settings s4 = bufferAndChunkSettings(4, 10);
|
||||
final IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class,
|
||||
() -> new S3Repository(getRepositoryMetaData(s4), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())
|
||||
.close());
|
||||
assertThat(e2.getMessage(), containsString("failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"));
|
||||
final Settings s5 = bufferAndChunkSettings(5, 6000000);
|
||||
final IllegalArgumentException e3 = expectThrows(IllegalArgumentException.class,
|
||||
() -> new S3Repository(getRepositoryMetaData(s5), Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())
|
||||
.close());
|
||||
assertThat(e3.getMessage(), containsString("failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"));
|
||||
}
|
||||
|
||||
private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB).getStringRep())
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB).getStringRep()).build());
|
||||
new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
private Settings bufferAndChunkSettings(long buffer, long chunk) {
|
||||
return Settings.builder()
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep())
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep())
|
||||
.build();
|
||||
}
|
||||
|
||||
private void assertInvalidBuffer(int bufferMB, int chunkMB, Class<? extends Exception> clazz, String msg) throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB).getStringRep())
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB).getStringRep()).build());
|
||||
|
||||
Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY,
|
||||
new DummyS3Service()));
|
||||
assertThat(e.getMessage(), containsString(msg));
|
||||
private RepositoryMetaData getRepositoryMetaData(Settings settings) {
|
||||
return new RepositoryMetaData("dummy-repo", "mock", Settings.builder().put(settings).build());
|
||||
}
|
||||
|
||||
public void testBasePathSetting() throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build());
|
||||
S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString());
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build());
|
||||
try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) {
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString());
|
||||
}
|
||||
}
|
||||
|
||||
public void testDefaultBufferSize() {
|
||||
ByteSizeValue defaultBufferSize = S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB)));
|
||||
assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB)));
|
||||
public void testDefaultBufferSize() throws IOException {
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY);
|
||||
try (S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service())) {
|
||||
final long defaultBufferSize = ((S3BlobStore) s3repo.blobStore()).bufferSizeInBytes();
|
||||
assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(100L * 1024 * 1024));
|
||||
assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(5L * 1024 * 1024));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ public class TestAmazonS3 extends AmazonS3Wrapper {
|
|||
private double writeFailureRate = 0.0;
|
||||
private double readFailureRate = 0.0;
|
||||
|
||||
private String randomPrefix;
|
||||
private final String randomPrefix;
|
||||
|
||||
ConcurrentMap<String, AtomicLong> accessCounts = new ConcurrentHashMap<>();
|
||||
|
||||
|
@ -76,18 +76,18 @@ public class TestAmazonS3 extends AmazonS3Wrapper {
|
|||
@Override
|
||||
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
|
||||
if (shouldFail(bucketName, key, writeFailureRate)) {
|
||||
long length = metadata.getContentLength();
|
||||
long partToRead = (long) (length * randomDouble());
|
||||
byte[] buffer = new byte[1024];
|
||||
final long length = metadata.getContentLength();
|
||||
final long partToRead = (long) (length * randomDouble());
|
||||
final byte[] buffer = new byte[1024];
|
||||
for (long cur = 0; cur < partToRead; cur += buffer.length) {
|
||||
try {
|
||||
input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
|
||||
} catch (IOException ex) {
|
||||
input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur));
|
||||
} catch (final IOException ex) {
|
||||
throw new ElasticsearchException("cannot read input stream", ex);
|
||||
}
|
||||
}
|
||||
logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
|
||||
AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
|
||||
final AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
|
||||
ex.setStatusCode(400);
|
||||
ex.setErrorCode("RequestTimeout");
|
||||
throw ex;
|
||||
|
@ -99,18 +99,18 @@ public class TestAmazonS3 extends AmazonS3Wrapper {
|
|||
@Override
|
||||
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
|
||||
if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) {
|
||||
long length = request.getPartSize();
|
||||
long partToRead = (long) (length * randomDouble());
|
||||
byte[] buffer = new byte[1024];
|
||||
final long length = request.getPartSize();
|
||||
final long partToRead = (long) (length * randomDouble());
|
||||
final byte[] buffer = new byte[1024];
|
||||
for (long cur = 0; cur < partToRead; cur += buffer.length) {
|
||||
try (InputStream input = request.getInputStream()){
|
||||
input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
|
||||
} catch (IOException ex) {
|
||||
input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur));
|
||||
} catch (final IOException ex) {
|
||||
throw new ElasticsearchException("cannot read input stream", ex);
|
||||
}
|
||||
}
|
||||
logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey());
|
||||
AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception");
|
||||
final AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception");
|
||||
ex.setStatusCode(400);
|
||||
ex.setErrorCode("RequestTimeout");
|
||||
throw ex;
|
||||
|
@ -123,7 +123,7 @@ public class TestAmazonS3 extends AmazonS3Wrapper {
|
|||
public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
|
||||
if (shouldFail(bucketName, key, readFailureRate)) {
|
||||
logger.info("--> random read failure on getObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
|
||||
AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception");
|
||||
final AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception");
|
||||
ex.setStatusCode(404);
|
||||
throw ex;
|
||||
} else {
|
||||
|
@ -135,7 +135,7 @@ public class TestAmazonS3 extends AmazonS3Wrapper {
|
|||
if (probability > 0.0) {
|
||||
String path = randomPrefix + "-" + bucketName + "+" + key;
|
||||
path += "/" + incrementAndGet(path);
|
||||
return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability;
|
||||
return Math.abs(hashCode(path)) < (Integer.MAX_VALUE * probability);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -143,14 +143,14 @@ public class TestAmazonS3 extends AmazonS3Wrapper {
|
|||
|
||||
private int hashCode(String path) {
|
||||
try {
|
||||
MessageDigest digest = MessageDigest.getInstance("MD5");
|
||||
byte[] bytes = digest.digest(path.getBytes("UTF-8"));
|
||||
final MessageDigest digest = MessageDigest.getInstance("MD5");
|
||||
final byte[] bytes = digest.digest(path.getBytes("UTF-8"));
|
||||
int i = 0;
|
||||
return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
|
||||
| ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
|
||||
} catch (UnsupportedEncodingException ex) {
|
||||
} catch (final UnsupportedEncodingException ex) {
|
||||
throw new ElasticsearchException("cannot calculate hashcode", ex);
|
||||
} catch (NoSuchAlgorithmException ex) {
|
||||
} catch (final NoSuchAlgorithmException ex) {
|
||||
throw new ElasticsearchException("cannot calculate hashcode", ex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,45 +22,39 @@ package org.elasticsearch.repositories.s3;
|
|||
import java.util.IdentityHashMap;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
public class TestAwsS3Service extends InternalAwsS3Service {
|
||||
public static class TestPlugin extends S3RepositoryPlugin {
|
||||
public TestPlugin(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
@Override
|
||||
protected AwsS3Service createStorageService(Settings settings) {
|
||||
return new TestAwsS3Service(settings);
|
||||
super(new TestAwsS3Service(settings));
|
||||
}
|
||||
}
|
||||
|
||||
IdentityHashMap<AmazonS3, TestAmazonS3> clients = new IdentityHashMap<>();
|
||||
IdentityHashMap<AmazonS3Reference, TestAmazonS3> clients = new IdentityHashMap<>();
|
||||
|
||||
public TestAwsS3Service(Settings settings) {
|
||||
super(settings, S3ClientSettings.load(settings));
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized AmazonS3 client(Settings repositorySettings) {
|
||||
return cachedWrapper(super.client(repositorySettings));
|
||||
public synchronized AmazonS3Reference client(String clientName) {
|
||||
return new AmazonS3Reference(cachedWrapper(super.client(clientName)));
|
||||
}
|
||||
|
||||
private AmazonS3 cachedWrapper(AmazonS3 client) {
|
||||
TestAmazonS3 wrapper = clients.get(client);
|
||||
private AmazonS3 cachedWrapper(AmazonS3Reference clientReference) {
|
||||
TestAmazonS3 wrapper = clients.get(clientReference);
|
||||
if (wrapper == null) {
|
||||
wrapper = new TestAmazonS3(client, settings);
|
||||
clients.put(client, wrapper);
|
||||
wrapper = new TestAmazonS3(clientReference.client(), settings);
|
||||
clients.put(clientReference, wrapper);
|
||||
}
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doClose() throws ElasticsearchException {
|
||||
super.doClose();
|
||||
protected synchronized void releaseCachedClients() {
|
||||
super.releaseCachedClients();
|
||||
clients.clear();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotT
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
|
||||
|
@ -241,6 +243,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestNodesUsageAction;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestReloadSecureSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction;
|
||||
|
@ -491,6 +494,7 @@ public class ActionModule extends AbstractModule {
|
|||
actions.register(ExplainAction.INSTANCE, TransportExplainAction.class);
|
||||
actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
|
||||
actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
|
||||
actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class);
|
||||
|
||||
//Indexed scripts
|
||||
actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
|
||||
|
@ -610,6 +614,8 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
registerHandler.accept(new RestRecoveryAction(settings, restController));
|
||||
|
||||
registerHandler.accept(new RestReloadSecureSettingsAction(settings, restController));
|
||||
|
||||
// Scripts API
|
||||
registerHandler.accept(new RestGetStoredScriptAction(settings, restController));
|
||||
registerHandler.accept(new RestPutStoredScriptAction(settings, restController));
|
||||
|
|
|
@ -17,14 +17,23 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
public class AzureServiceRemoteException extends IllegalStateException {
|
||||
public AzureServiceRemoteException(String msg) {
|
||||
super(msg);
|
||||
import org.elasticsearch.action.Action;
|
||||
|
||||
public class NodesReloadSecureSettingsAction
|
||||
extends Action<NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse> {
|
||||
|
||||
public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction();
|
||||
public static final String NAME = "cluster:admin/nodes/reload_secure_settings";
|
||||
|
||||
private NodesReloadSecureSettingsAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
public AzureServiceRemoteException(String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
@Override
|
||||
public NodesReloadSecureSettingsResponse newResponse() {
|
||||
return new NodesReloadSecureSettingsResponse();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.CharBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* Request for a reload secure settings action
|
||||
*/
|
||||
public class NodesReloadSecureSettingsRequest extends BaseNodesRequest<NodesReloadSecureSettingsRequest> {
|
||||
|
||||
/**
|
||||
* The password which is broadcasted to all nodes, but is never stored on
|
||||
* persistent storage. The password is used to reread and decrypt the contents
|
||||
* of the node's keystore (backing the implementation of
|
||||
* {@code SecureSettings}).
|
||||
*/
|
||||
private SecureString secureSettingsPassword;
|
||||
|
||||
public NodesReloadSecureSettingsRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Reload secure settings only on certain nodes, based on the nodes ids
|
||||
* specified. If none are passed, secure settings will be reloaded on all the
|
||||
* nodes.
|
||||
*/
|
||||
public NodesReloadSecureSettingsRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (secureSettingsPassword == null) {
|
||||
validationException = addValidationError("secure settings password cannot be null (use empty string instead)",
|
||||
validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
public SecureString secureSettingsPassword() {
|
||||
return secureSettingsPassword;
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsRequest secureStorePassword(SecureString secureStorePassword) {
|
||||
this.secureSettingsPassword = secureStorePassword;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
final byte[] passwordBytes = in.readByteArray();
|
||||
try {
|
||||
this.secureSettingsPassword = new SecureString(utf8BytesToChars(passwordBytes));
|
||||
} finally {
|
||||
Arrays.fill(passwordBytes, (byte) 0);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
final byte[] passwordBytes = charsToUtf8Bytes(this.secureSettingsPassword.getChars());
|
||||
try {
|
||||
out.writeByteArray(passwordBytes);
|
||||
} finally {
|
||||
Arrays.fill(passwordBytes, (byte) 0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding
|
||||
* conversions to String. The provided char[] is not modified by this method, so
|
||||
* the caller needs to take care of clearing the value if it is sensitive.
|
||||
*/
|
||||
private static byte[] charsToUtf8Bytes(char[] chars) {
|
||||
final CharBuffer charBuffer = CharBuffer.wrap(chars);
|
||||
final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer);
|
||||
final byte[] bytes;
|
||||
if (byteBuffer.hasArray()) {
|
||||
// there is no guarantee that the byte buffers backing array is the right size
|
||||
// so we need to make a copy
|
||||
bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit());
|
||||
Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data
|
||||
} else {
|
||||
final int length = byteBuffer.limit() - byteBuffer.position();
|
||||
bytes = new byte[length];
|
||||
byteBuffer.get(bytes);
|
||||
// if the buffer is not read only we can reset and fill with 0's
|
||||
if (byteBuffer.isReadOnly() == false) {
|
||||
byteBuffer.clear(); // reset
|
||||
for (int i = 0; i < byteBuffer.limit(); i++) {
|
||||
byteBuffer.put((byte) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes the provided byte[] to a UTF-8 char[]. This is done while avoiding
|
||||
* conversions to String. The provided byte[] is not modified by this method, so
|
||||
* the caller needs to take care of clearing the value if it is sensitive.
|
||||
*/
|
||||
public static char[] utf8BytesToChars(byte[] utf8Bytes) {
|
||||
final ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes);
|
||||
final CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer);
|
||||
final char[] chars;
|
||||
if (charBuffer.hasArray()) {
|
||||
// there is no guarantee that the char buffers backing array is the right size
|
||||
// so we need to make a copy
|
||||
chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit());
|
||||
Arrays.fill(charBuffer.array(), (char) 0); // clear sensitive data
|
||||
} else {
|
||||
final int length = charBuffer.limit() - charBuffer.position();
|
||||
chars = new char[length];
|
||||
charBuffer.get(chars);
|
||||
// if the buffer is not read only we can reset and fill with 0's
|
||||
if (charBuffer.isReadOnly() == false) {
|
||||
charBuffer.clear(); // reset
|
||||
for (int i = 0; i < charBuffer.limit(); i++) {
|
||||
charBuffer.put((char) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return chars;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Builder for the reload secure settings nodes request
|
||||
*/
|
||||
public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder<NodesReloadSecureSettingsRequest,
|
||||
NodesReloadSecureSettingsResponse, NodesReloadSecureSettingsRequestBuilder> {
|
||||
|
||||
public static final String SECURE_SETTINGS_PASSWORD_FIELD_NAME = "secure_settings_password";
|
||||
|
||||
public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client, NodesReloadSecureSettingsAction action) {
|
||||
super(client, action, new NodesReloadSecureSettingsRequest());
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) {
|
||||
request.secureStorePassword(secureStorePassword);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException {
|
||||
Objects.requireNonNull(xContentType);
|
||||
// EMPTY is ok here because we never call namedObject
|
||||
try (InputStream stream = source.streamInput();
|
||||
XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY,
|
||||
LoggingDeprecationHandler.INSTANCE, stream)) {
|
||||
XContentParser.Token token;
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("expected an object, but found token [{}]", token);
|
||||
}
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.FIELD_NAME || false == SECURE_SETTINGS_PASSWORD_FIELD_NAME.equals(parser.currentName())) {
|
||||
throw new ElasticsearchParseException("expected a field named [{}], but found [{}]", SECURE_SETTINGS_PASSWORD_FIELD_NAME,
|
||||
token);
|
||||
}
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.VALUE_STRING) {
|
||||
throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead",
|
||||
SECURE_SETTINGS_PASSWORD_FIELD_NAME, token);
|
||||
}
|
||||
final String password = parser.text();
|
||||
setSecureStorePassword(new SecureString(password.toCharArray()));
|
||||
token = parser.nextToken();
|
||||
if (token != XContentParser.Token.END_OBJECT) {
|
||||
throw new ElasticsearchParseException("expected end of object, but found token [{}]", token);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* The response for the reload secure settings action
|
||||
*/
|
||||
public class NodesReloadSecureSettingsResponse extends BaseNodesResponse<NodesReloadSecureSettingsResponse.NodeResponse>
|
||||
implements ToXContentFragment {
|
||||
|
||||
public NodesReloadSecureSettingsResponse() {
|
||||
}
|
||||
|
||||
public NodesReloadSecureSettingsResponse(ClusterName clusterName, List<NodeResponse> nodes, List<FailedNodeException> failures) {
|
||||
super(clusterName, nodes, failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<NodesReloadSecureSettingsResponse.NodeResponse> readNodesFrom(StreamInput in) throws IOException {
|
||||
return in.readList(NodeResponse::readNodeResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeNodesTo(StreamOutput out, List<NodesReloadSecureSettingsResponse.NodeResponse> nodes) throws IOException {
|
||||
out.writeStreamableList(nodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("nodes");
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse node : getNodes()) {
|
||||
builder.startObject(node.getNode().getId());
|
||||
builder.field("name", node.getNode().getName());
|
||||
final Exception e = node.reloadException();
|
||||
if (e != null) {
|
||||
builder.startObject("reload_exception");
|
||||
ElasticsearchException.generateThrowableXContent(builder, params, e);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return Strings.toString(builder);
|
||||
} catch (final IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeResponse extends BaseNodeResponse {
|
||||
|
||||
private Exception reloadException = null;
|
||||
|
||||
public NodeResponse() {
|
||||
}
|
||||
|
||||
public NodeResponse(DiscoveryNode node, Exception reloadException) {
|
||||
super(node);
|
||||
this.reloadException = reloadException;
|
||||
}
|
||||
|
||||
public Exception reloadException() {
|
||||
return this.reloadException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
reloadException = in.readException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (reloadException != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeException(reloadException);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
final NodesReloadSecureSettingsResponse.NodeResponse that = (NodesReloadSecureSettingsResponse.NodeResponse) o;
|
||||
return reloadException != null ? reloadException.equals(that.reloadException) : that.reloadException == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return reloadException != null ? reloadException.hashCode() : 0;
|
||||
}
|
||||
|
||||
public static NodeResponse readNodeResponse(StreamInput in) throws IOException {
|
||||
final NodeResponse node = new NodeResponse();
|
||||
node.readFrom(in);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.reload;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class TransportNodesReloadSecureSettingsAction extends TransportNodesAction<NodesReloadSecureSettingsRequest,
|
||||
NodesReloadSecureSettingsResponse,
|
||||
TransportNodesReloadSecureSettingsAction.NodeRequest,
|
||||
NodesReloadSecureSettingsResponse.NodeResponse> {
|
||||
|
||||
private final Environment environment;
|
||||
private final PluginsService pluginsService;
|
||||
|
||||
@Inject
|
||||
public TransportNodesReloadSecureSettingsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Environment environment,
|
||||
PluginsService pluginService) {
|
||||
super(settings, NodesReloadSecureSettingsAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, NodesReloadSecureSettingsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC,
|
||||
NodesReloadSecureSettingsResponse.NodeResponse.class);
|
||||
this.environment = environment;
|
||||
this.pluginsService = pluginService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesReloadSecureSettingsResponse newResponse(NodesReloadSecureSettingsRequest request,
|
||||
List<NodesReloadSecureSettingsResponse.NodeResponse> responses,
|
||||
List<FailedNodeException> failures) {
|
||||
return new NodesReloadSecureSettingsResponse(clusterService.getClusterName(), responses, failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeRequest newNodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) {
|
||||
return new NodeRequest(nodeId, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() {
|
||||
return new NodesReloadSecureSettingsResponse.NodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) {
|
||||
final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request;
|
||||
final SecureString secureSettingsPassword = request.secureSettingsPassword();
|
||||
try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) {
|
||||
// reread keystore from config file
|
||||
if (keystore == null) {
|
||||
return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(),
|
||||
new IllegalStateException("Keystore is missing"));
|
||||
}
|
||||
// decrypt the keystore using the password from the request
|
||||
keystore.decrypt(secureSettingsPassword.getChars());
|
||||
// add the keystore to the original node settings object
|
||||
final Settings settingsWithKeystore = Settings.builder()
|
||||
.put(environment.settings(), false)
|
||||
.setSecureSettings(keystore)
|
||||
.build();
|
||||
final List<Exception> exceptions = new ArrayList<>();
|
||||
// broadcast the new settings object (with the open embedded keystore) to all reloadable plugins
|
||||
pluginsService.filterPlugins(ReloadablePlugin.class).stream().forEach(p -> {
|
||||
try {
|
||||
p.reload(settingsWithKeystore);
|
||||
} catch (final Exception e) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Reload failed for plugin [{}]", p.getClass().getSimpleName()),
|
||||
e);
|
||||
exceptions.add(e);
|
||||
}
|
||||
});
|
||||
ExceptionsHelper.rethrowAndSuppress(exceptions);
|
||||
return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), null);
|
||||
} catch (final Exception e) {
|
||||
return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeRequest extends BaseNodeRequest {
|
||||
|
||||
NodesReloadSecureSettingsRequest request;
|
||||
|
||||
public NodeRequest() {
|
||||
}
|
||||
|
||||
NodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
request = new NodesReloadSecureSettingsRequest();
|
||||
request.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
request.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRes
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
|
@ -185,6 +186,11 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
ClusterUpdateSettingsRequestBuilder prepareUpdateSettings();
|
||||
|
||||
/**
|
||||
* Re initialize each cluster node and pass them the secret store password.
|
||||
*/
|
||||
NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings();
|
||||
|
||||
/**
|
||||
* Reroutes allocation of shards. Advance API.
|
||||
*/
|
||||
|
|
|
@ -41,6 +41,8 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
|
@ -771,6 +773,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new ClusterUpdateSettingsRequestBuilder(this, ClusterUpdateSettingsAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings() {
|
||||
return new NodesReloadSecureSettingsRequestBuilder(this, NodesReloadSecureSettingsAction.INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<NodesInfoResponse> nodesInfo(final NodesInfoRequest request) {
|
||||
return execute(NodesInfoAction.INSTANCE, request);
|
||||
|
|
|
@ -308,7 +308,9 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
}
|
||||
if (formatVersion <= 2) {
|
||||
decryptLegacyEntries();
|
||||
assert password.length == 0;
|
||||
if (password.length != 0) {
|
||||
throw new IllegalArgumentException("Keystore format does not accept non-empty passwords");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.elasticsearch.common.CheckedSupplier;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Encapsulates a {@link CheckedSupplier} which is lazily invoked once on the
|
||||
* first call to {@code #getOrCompute()}. The value which the
|
||||
* <code>supplier</code> returns is memorized and will be served until
|
||||
* {@code #reset()} is called. Each value returned by {@code #getOrCompute()},
|
||||
* newly minted or cached, will be passed to the <code>onGet</code>
|
||||
* {@link Consumer}. On {@code #reset()} the value will be passed to the
|
||||
* <code>onReset</code> {@code Consumer} and the next {@code #getOrCompute()}
|
||||
* will regenerate the value.
|
||||
*/
|
||||
public final class LazyInitializable<T, E extends Exception> {
|
||||
|
||||
private final CheckedSupplier<T, E> supplier;
|
||||
private final Consumer<T> onGet;
|
||||
private final Consumer<T> onReset;
|
||||
private volatile T value;
|
||||
|
||||
/**
|
||||
* Creates the simple LazyInitializable instance.
|
||||
*
|
||||
* @param supplier
|
||||
* The {@code CheckedSupplier} to generate values which will be
|
||||
* served on {@code #getOrCompute()} invocations.
|
||||
*/
|
||||
public LazyInitializable(CheckedSupplier<T, E> supplier) {
|
||||
this(supplier, v -> {}, v -> {});
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the complete LazyInitializable instance.
|
||||
*
|
||||
* @param supplier
|
||||
* The {@code CheckedSupplier} to generate values which will be
|
||||
* served on {@code #getOrCompute()} invocations.
|
||||
* @param onGet
|
||||
* A {@code Consumer} which is called on each value, newly forged or
|
||||
* stale, that is returned by {@code #getOrCompute()}
|
||||
* @param onReset
|
||||
* A {@code Consumer} which is invoked on the value that will be
|
||||
* erased when calling {@code #reset()}
|
||||
*/
|
||||
public LazyInitializable(CheckedSupplier<T, E> supplier, Consumer<T> onGet, Consumer<T> onReset) {
|
||||
this.supplier = supplier;
|
||||
this.onGet = onGet;
|
||||
this.onReset = onReset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a value that was created by <code>supplier</code>. The value might
|
||||
* have been previously created, if not it will be created now, thread safe of
|
||||
* course.
|
||||
*/
|
||||
public T getOrCompute() throws E {
|
||||
final T readOnce = value; // Read volatile just once...
|
||||
final T result = readOnce == null ? maybeCompute(supplier) : readOnce;
|
||||
onGet.accept(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the value, if it has been previously created by calling
|
||||
* {@code #getOrCompute()}. The <code>onReset</code> will be called on this
|
||||
* value. The next call to {@code #getOrCompute()} will recreate the value.
|
||||
*/
|
||||
public synchronized void reset() {
|
||||
if (value != null) {
|
||||
onReset.accept(value);
|
||||
value = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new value thread safely.
|
||||
*/
|
||||
private synchronized T maybeCompute(CheckedSupplier<T, E> supplier) throws E {
|
||||
if (value == null) {
|
||||
value = Objects.requireNonNull(supplier.get());
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
}
|
|
@ -74,6 +74,7 @@ import java.util.function.UnaryOperator;
|
|||
* <li>{@link RepositoryPlugin}
|
||||
* <li>{@link ScriptPlugin}
|
||||
* <li>{@link SearchPlugin}
|
||||
* <li>{@link ReloadablePlugin}
|
||||
* </ul>
|
||||
* <p>In addition to extension points this class also declares some {@code @Deprecated} {@code public final void onModule} methods. These
|
||||
* methods should cause any extensions of {@linkplain Plugin} that used the pre-5.x style extension syntax to fail to build and point the
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* An extension point for {@link Plugin}s that can be reloaded. There is no
|
||||
* clear definition about what reloading a plugin actually means. When a plugin
|
||||
* is reloaded it might rebuild any internal members. Plugins usually implement
|
||||
* this interface in order to reread the values of {@code SecureSetting}s and
|
||||
* then rebuild any dependent internal members.
|
||||
*/
|
||||
public interface ReloadablePlugin {
|
||||
/**
|
||||
* Called to trigger the rebuilt of the plugin's internal members. The reload
|
||||
* operation <b>is required to have been completed</b> when the method returns.
|
||||
* Strictly speaking, the <code>settings</code> argument should not be accessed
|
||||
* outside of this method's call stack, as any values stored in the node's
|
||||
* keystore (see {@code SecureSetting}) will not otherwise be retrievable. The
|
||||
* setting values do not follow dynamic updates, i.e. the values are identical
|
||||
* to the ones during the initial plugin loading, barring the keystore file on
|
||||
* disk changes. Any failure during the operation should be signaled by raising
|
||||
* an exception, but the plugin should otherwise continue to function
|
||||
* unperturbed.
|
||||
*
|
||||
* @param settings
|
||||
* Settings used while reloading the plugin. All values are
|
||||
* retrievable, including the values stored in the node's keystore.
|
||||
* The setting values are the initial ones, from when the node has be
|
||||
* started, i.e. they don't follow dynamic updates.
|
||||
* @throws Exception
|
||||
* if the operation failed. The plugin should continue to operate as
|
||||
* if the offending call didn't happen.
|
||||
*/
|
||||
void reload(Settings settings) throws Exception;
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.cluster;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestActions;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public final class RestReloadSecureSettingsAction extends BaseRestHandler {
|
||||
|
||||
public RestReloadSecureSettingsAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(POST, "/_nodes/reload_secure_settings", this);
|
||||
controller.registerHandler(POST, "/_nodes/{nodeId}/reload_secure_settings", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "nodes_reload_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
||||
final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
|
||||
final NodesReloadSecureSettingsRequestBuilder nodesRequestBuilder = client.admin()
|
||||
.cluster()
|
||||
.prepareReloadSecureSettings()
|
||||
.setTimeout(request.param("timeout"))
|
||||
.source(request.requiredContent(), request.getXContentType())
|
||||
.setNodesIds(nodesIds);
|
||||
final NodesReloadSecureSettingsRequest nodesRequest = nodesRequestBuilder.request();
|
||||
return channel -> nodesRequestBuilder
|
||||
.execute(new RestBuilderListener<NodesReloadSecureSettingsResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder)
|
||||
throws Exception {
|
||||
builder.startObject();
|
||||
RestActions.buildNodesHeader(builder, channel.request(), response);
|
||||
builder.field("cluster_name", response.getClusterName().value());
|
||||
response.toXContent(builder, channel.request());
|
||||
builder.endObject();
|
||||
// clear password for the original request
|
||||
nodesRequest.secureSettingsPassword().close();
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canTripCircuitBreaker() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,422 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.SecureSettings;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.security.AccessControlException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
||||
|
||||
public void testMissingKeystoreFile() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
// keystore file should be missing for this test case
|
||||
Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile()));
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
assertThat(nodesReloadResponse, notNullValue());
|
||||
final Map<String, NodesReloadSecureSettingsResponse.NodeResponse> nodesMap = nodesReloadResponse.getNodesMap();
|
||||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), notNullValue());
|
||||
assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class));
|
||||
assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing"));
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
reloadSettingsError.set(new AssertionError("Nodes request failed", e));
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// in the missing keystore case no reload should be triggered
|
||||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testNullKeystorePassword() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
reloadSettingsError.set(new AssertionError("Null keystore password should fail"));
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
assertThat(e, instanceOf(ActionRequestValidationException.class));
|
||||
assertThat(e.getMessage(), containsString("secure settings password cannot be null"));
|
||||
} catch (final AssertionError ae) {
|
||||
reloadSettingsError.set(ae);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// in the null password case no reload should be triggered
|
||||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testInvalidKeystoreFile() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
// invalid "keystore" file should be present in the config dir
|
||||
try (InputStream keystore = ReloadSecureSettingsIT.class.getResourceAsStream("invalid.txt.keystore")) {
|
||||
if (Files.exists(environment.configFile()) == false) {
|
||||
Files.createDirectory(environment.configFile());
|
||||
}
|
||||
Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
assertThat(nodesReloadResponse, notNullValue());
|
||||
final Map<String, NodesReloadSecureSettingsResponse.NodeResponse> nodesMap = nodesReloadResponse.getNodesMap();
|
||||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), notNullValue());
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
reloadSettingsError.set(new AssertionError("Nodes request failed", e));
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// in the invalid keystore format case no reload should be triggered
|
||||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testWrongKeystorePassword() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
// "some" keystore should be present in this case
|
||||
writeEmptyKeystore(environment, new char[0]);
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin()
|
||||
.cluster()
|
||||
.prepareReloadSecureSettings()
|
||||
.setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' }))
|
||||
.execute(new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
assertThat(nodesReloadResponse, notNullValue());
|
||||
final Map<String, NodesReloadSecureSettingsResponse.NodeResponse> nodesMap = nodesReloadResponse.getNodesMap();
|
||||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), notNullValue());
|
||||
assertThat(nodeResponse.reloadException(), instanceOf(IOException.class));
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
reloadSettingsError.set(new AssertionError("Nodes request failed", e));
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// in the wrong password case no reload should be triggered
|
||||
assertThat(mockReloadablePlugin.getReloadCount(), equalTo(initialReloadCount));
|
||||
}
|
||||
|
||||
public void testMisbehavingPlugin() throws Exception {
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
// make plugins throw on reload
|
||||
for (final String nodeName : internalCluster().getNodeNames()) {
|
||||
internalCluster().getInstance(PluginsService.class, nodeName)
|
||||
.filterPlugins(MisbehavingReloadablePlugin.class)
|
||||
.stream().findFirst().get().setShouldThrow(true);
|
||||
}
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
// "some" keystore should be present
|
||||
final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]);
|
||||
// read seed setting value from the test case (not from the node)
|
||||
final String seedValue = KeyStoreWrapper.SEED_SETTING
|
||||
.get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build())
|
||||
.toString();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
assertThat(nodesReloadResponse, notNullValue());
|
||||
final Map<String, NodesReloadSecureSettingsResponse.NodeResponse> nodesMap = nodesReloadResponse.getNodesMap();
|
||||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), notNullValue());
|
||||
assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw"));
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
reloadSettingsError.set(new AssertionError("Nodes request failed", e));
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
// even if one plugin fails to reload (throws Exception), others should be
|
||||
// unperturbed
|
||||
assertThat(mockReloadablePlugin.getReloadCount() - initialReloadCount, equalTo(1));
|
||||
// mock plugin should have been reloaded successfully
|
||||
assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue));
|
||||
}
|
||||
|
||||
public void testReloadWhileKeystoreChanged() throws Exception {
|
||||
final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
|
||||
final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class)
|
||||
.stream().findFirst().get();
|
||||
final Environment environment = internalCluster().getInstance(Environment.class);
|
||||
final int initialReloadCount = mockReloadablePlugin.getReloadCount();
|
||||
for (int i = 0; i < randomIntBetween(4, 8); i++) {
|
||||
// write keystore
|
||||
final SecureSettings secureSettings = writeEmptyKeystore(environment, new char[0]);
|
||||
// read seed setting value from the test case (not from the node)
|
||||
final String seedValue = KeyStoreWrapper.SEED_SETTING
|
||||
.get(Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build())
|
||||
.toString();
|
||||
// reload call
|
||||
successfulReloadCall();
|
||||
assertThat(mockReloadablePlugin.getSeedValue(), equalTo(seedValue));
|
||||
assertThat(mockReloadablePlugin.getReloadCount() - initialReloadCount, equalTo(i + 1));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
final List<Class<? extends Plugin>> plugins = Arrays.asList(MockReloadablePlugin.class, MisbehavingReloadablePlugin.class);
|
||||
// shuffle as reload is called in order
|
||||
Collections.shuffle(plugins, random());
|
||||
return plugins;
|
||||
}
|
||||
|
||||
private void successfulReloadCall() throws InterruptedException {
|
||||
final AtomicReference<AssertionError> reloadSettingsError = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client().admin().cluster().prepareReloadSecureSettings().setSecureStorePassword(new SecureString(new char[0])).execute(
|
||||
new ActionListener<NodesReloadSecureSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) {
|
||||
try {
|
||||
assertThat(nodesReloadResponse, notNullValue());
|
||||
final Map<String, NodesReloadSecureSettingsResponse.NodeResponse> nodesMap = nodesReloadResponse.getNodesMap();
|
||||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), nullValue());
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
reloadSettingsError.set(new AssertionError("Nodes request failed", e));
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (reloadSettingsError.get() != null) {
|
||||
throw reloadSettingsError.get();
|
||||
}
|
||||
}
|
||||
|
||||
private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception {
|
||||
final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create();
|
||||
try {
|
||||
keyStoreWrapper.save(environment.configFile(), password);
|
||||
} catch (final AccessControlException e) {
|
||||
if (e.getPermission() instanceof RuntimePermission && e.getPermission().getName().equals("accessUserInformation")) {
|
||||
// this is expected: the save method is extra diligent and wants to make sure
|
||||
// the keystore is readable, not relying on umask and whatnot. It's ok, we don't
|
||||
// care about this in tests.
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return keyStoreWrapper;
|
||||
}
|
||||
|
||||
public static class CountingReloadablePlugin extends Plugin implements ReloadablePlugin {
|
||||
|
||||
private volatile int reloadCount;
|
||||
|
||||
public CountingReloadablePlugin() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(Settings settings) throws Exception {
|
||||
reloadCount++;
|
||||
}
|
||||
|
||||
public int getReloadCount() {
|
||||
return reloadCount;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class MockReloadablePlugin extends CountingReloadablePlugin {
|
||||
|
||||
private volatile String seedValue;
|
||||
|
||||
public MockReloadablePlugin() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(Settings settings) throws Exception {
|
||||
super.reload(settings);
|
||||
this.seedValue = KeyStoreWrapper.SEED_SETTING.get(settings).toString();
|
||||
}
|
||||
|
||||
public String getSeedValue() {
|
||||
return seedValue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class MisbehavingReloadablePlugin extends CountingReloadablePlugin {
|
||||
|
||||
private boolean shouldThrow = false;
|
||||
|
||||
public MisbehavingReloadablePlugin() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void reload(Settings settings) throws Exception {
|
||||
super.reload(settings);
|
||||
if (shouldThrow) {
|
||||
shouldThrow = false;
|
||||
throw new Exception("If shouldThrow I throw");
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void setShouldThrow(boolean shouldThrow) {
|
||||
this.shouldThrow = shouldThrow;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
admin admin
|
||||
dragon 12345
|
||||
|
Loading…
Reference in New Issue