diff --git a/plugins/cloud-azure/NOTICE.txt b/plugins/cloud-azure/NOTICE.txt new file mode 100644 index 00000000000..86016475acd --- /dev/null +++ b/plugins/cloud-azure/NOTICE.txt @@ -0,0 +1,5 @@ +ElasticSearch +Copyright 2009-2013 ElasticSearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/plugins/cloud-azure/README.md b/plugins/cloud-azure/README.md new file mode 100644 index 00000000000..0da09f09c06 --- /dev/null +++ b/plugins/cloud-azure/README.md @@ -0,0 +1,568 @@ +Azure Cloud Plugin for Elasticsearch +==================================== + +The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. + +In order to install the plugin, run: + +```sh +bin/plugin install elasticsearch/elasticsearch-cloud-azure/2.6.1 +``` + +You need to install a version matching your Elasticsearch version: + +| Elasticsearch | Azure Cloud Plugin| Docs | +|------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| +| master | Build from source | See below | +| es-1.x | Build from source | [2.7.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#version-270-snapshot-for-elasticsearch-1x)| +| es-1.5 | 2.6.1 | [2.6.1](https://github.com/elastic/elasticsearch-cloud-azure/tree/v2.6.1/#version-261-for-elasticsearch-15) | +| es-1.4 | 2.5.2 | [2.5.2](https://github.com/elastic/elasticsearch-cloud-azure/tree/v2.5.2/#version-252-for-elasticsearch-14) | +| es-1.3 | 2.4.0 | [2.4.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.4.0/#version-240-for-elasticsearch-13) | +| es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | +| es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | +| es-1.0 | 2.1.0 | [2.1.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.1.0/#azure-cloud-plugin-for-elasticsearch) | +| es-0.90 | 1.0.0.alpha1 | [1.0.0.alpha1](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v1.0.0.alpha1/#azure-cloud-plugin-for-elasticsearch)| + +To build a `SNAPSHOT` version, you need to build it with Maven: + +```bash +mvn clean install +plugin --install cloud-azure \ + --url file:target/releases/elasticsearch-cloud-azure-X.X.X-SNAPSHOT.zip +``` + +Azure Virtual Machine Discovery +=============================== + +Azure VM discovery allows to use the azure APIs to perform automatic discovery (similar to multicast in non hostile +multicast environments). Here is a simple sample configuration: + +``` +cloud: + azure: + management: + subscription.id: XXX-XXX-XXX-XXX + cloud.service.name: es-demo-app + keystore: + path: /path/to/azurekeystore.pkcs12 + password: WHATEVER + type: pkcs12 + +discovery: + type: azure +``` + +How to start (short story) +-------------------------- + +* Create Azure instances +* Install Elasticsearch +* Install Azure plugin +* Modify `elasticsearch.yml` file +* Start Elasticsearch + +Azure credential API settings +----------------------------- + +The following are a list of settings that can further control the credential API: + +* `cloud.azure.management.keystore.path`: /path/to/keystore +* `cloud.azure.management.keystore.type`: `pkcs12`, `jceks` or `jks`. Defaults to `pkcs12`. +* `cloud.azure.management.keystore.password`: your_password for the keystore +* `cloud.azure.management.subscription.id`: your_azure_subscription_id +* `cloud.azure.management.cloud.service.name`: your_azure_cloud_service_name + +Note that in previous versions, it was: + +``` +cloud: + azure: + keystore: /path/to/keystore + password: your_password_for_keystore + subscription_id: your_azure_subscription_id + service_name: your_azure_cloud_service_name +``` + +Advanced settings +----------------- + +The following are a list of settings that can further control the discovery: + +* `discovery.azure.host.type`: either `public_ip` or `private_ip` (default). Azure discovery will use the one you set to ping +other nodes. This feature was not documented before but was existing under `cloud.azure.host_type`. +* `discovery.azure.endpoint.name`: when using `public_ip` this setting is used to identify the endpoint name used to forward requests +to elasticsearch (aka transport port name). Defaults to `elasticsearch`. In Azure management console, you could define +an endpoint `elasticsearch` forwarding for example requests on public IP on port 8100 to the virtual machine on port 9300. +This feature was not documented before but was existing under `cloud.azure.port_name`. +* `discovery.azure.deployment.name`: deployment name if any. Defaults to the value set with `cloud.azure.management.cloud.service.name`. +* `discovery.azure.deployment.slot`: either `staging` or `production` (default). + +For example: + +``` +discovery: + type: azure + azure: + host: + type: private_ip + endpoint: + name: elasticsearch + deployment: + name: your_azure_cloud_service_name + slot: production +``` + +How to start (long story) +-------------------------- + +We will expose here one strategy which is to hide our Elasticsearch cluster from outside. + +With this strategy, only VM behind this same virtual port can talk to each other. +That means that with this mode, you can use elasticsearch unicast discovery to build a cluster. + +Best, you can use the `elasticsearch-cloud-azure` plugin to let it fetch information about your nodes using +azure API. + +### Prerequisites + +Before starting, you need to have: + +* A [Windows Azure account](http://www.windowsazure.com/) +* SSH keys and certificate +* OpenSSL that isn't from MacPorts, specifically `OpenSSL 1.0.1f 6 Jan + 2014` doesn't seem to create a valid keypair for ssh. FWIW, + `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 12.04 LTS is known to work. + +You should follow [this guide](http://azure.microsoft.com/en-us/documentation/articles/linux-use-ssh-key/) to learn +how to create or use existing SSH keys. If you have already did it, you can skip the following. + +Here is a description on how to generate SSH keys using `openssl`: + +```sh +# You may want to use another dir than /tmp +cd /tmp +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem +chmod 600 azure-private.key azure-certificate.pem +openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer +``` + +Generate a keystore which will be used by the plugin to authenticate with a certificate +all Azure API calls. + +```sh +# Generate a keystore (azurekeystore.pkcs12) +# Transform private key to PEM format +openssl pkcs8 -topk8 -nocrypt -in azure-private.key -inform PEM -out azure-pk.pem -outform PEM +# Transform certificate to PEM format +openssl x509 -inform der -in azure-certificate.cer -out azure-cert.pem +cat azure-cert.pem azure-pk.pem > azure.pem.txt +# You MUST enter a password! +openssl pkcs12 -export -in azure.pem.txt -out azurekeystore.pkcs12 -name azure -noiter -nomaciter +``` + +Upload the `azure-certificate.cer` file both in the elasticsearch Cloud Service (under `Manage Certificates`), +and under `Settings -> Manage Certificates`. + +**Important**: when prompted for a password, you need to enter a non empty one. + +See this [guide](http://www.windowsazure.com/en-us/manage/linux/how-to-guides/ssh-into-linux/) to have +more details on how to create keys for Azure. + +Once done, you need to upload your certificate in Azure: + +* Go to the [management console](https://account.windowsazure.com/). +* Sign in using your account. +* Click on `Portal`. +* Go to Settings (bottom of the left list) +* On the bottom bar, click on `Upload` and upload your `azure-certificate.cer` file. + +You may want to use [Windows Azure Command-Line Tool](http://www.windowsazure.com/en-us/develop/nodejs/how-to-guides/command-line-tools/): + +* Install [NodeJS](https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager), for example using +homebrew on MacOS X: + +```sh +brew install node +``` + +* Install Azure tools: + +```sh +sudo npm install azure-cli -g +``` + +* Download and import your azure settings: + +```sh +# This will open a browser and will download a .publishsettings file +azure account download + +# Import this file (we have downloaded it to /tmp) +# Note, it will create needed files in ~/.azure. You can remove azure.publishsettings when done. +azure account import /tmp/azure.publishsettings +``` + +### Creating your first instance + +You need to have a storage account available. Check [Azure Blob Storage documentation](http://www.windowsazure.com/en-us/develop/net/how-to-guides/blob-storage/#create-account) +for more information. + +You will need to choose the operating system you want to run on. To get a list of official available images, run: + +```sh +azure vm image list +``` + +Let's say we are going to deploy an Ubuntu image on an extra small instance in West Europe: + +* Azure cluster name: `azure-elasticsearch-cluster` +* Image: `b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GB` +* VM Name: `myesnode1` +* VM Size: `extrasmall` +* Location: `West Europe` +* Login: `elasticsearch` +* Password: `password1234!!` + +Using command line: + +```sh +azure vm create azure-elasticsearch-cluster \ + b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GB \ + --vm-name myesnode1 \ + --location "West Europe" \ + --vm-size extrasmall \ + --ssh 22 \ + --ssh-cert /tmp/azure-certificate.pem \ + elasticsearch password1234\!\! +``` + +You should see something like: + +``` +info: Executing command vm create ++ Looking up image ++ Looking up cloud service ++ Creating cloud service ++ Retrieving storage accounts ++ Configuring certificate ++ Creating VM +info: vm create command OK +``` + +Now, your first instance is started. You need to install Elasticsearch on it. + +> **Note on SSH** +> +> You need to give the private key and username each time you log on your instance: +> +>```sh +>ssh -i ~/.ssh/azure-private.key elasticsearch@myescluster.cloudapp.net +>``` +> +> But you can also define it once in `~/.ssh/config` file: +> +>``` +>Host *.cloudapp.net +> User elasticsearch +> StrictHostKeyChecking no +> UserKnownHostsFile=/dev/null +> IdentityFile ~/.ssh/azure-private.key +>``` + + +```sh +# First, copy your keystore on this machine +scp /tmp/azurekeystore.pkcs12 azure-elasticsearch-cluster.cloudapp.net:/home/elasticsearch + +# Then, connect to your instance using SSH +ssh azure-elasticsearch-cluster.cloudapp.net +``` + +Once connected, install Elasticsearch: + +```sh +# Install Latest Java version +# Read http://www.webupd8.org/2012/01/install-oracle-java-jdk-7-in-ubuntu-via.html for details +sudo add-apt-repository ppa:webupd8team/java +sudo apt-get update +sudo apt-get install oracle-java7-installer + +# If you want to install OpenJDK instead +# sudo apt-get update +# sudo apt-get install openjdk-7-jre-headless + +# Download Elasticsearch +curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.0.0.deb -o elasticsearch-1.0.0.deb + +# Prepare Elasticsearch installation +sudo dpkg -i elasticsearch-1.0.0.deb +``` + +Check that elasticsearch is running: + +```sh +curl http://localhost:9200/ +``` + +This command should give you a JSON result: + +```javascript +{ + "status" : 200, + "name" : "Living Colossus", + "version" : { + "number" : "1.0.0", + "build_hash" : "a46900e9c72c0a623d71b54016357d5f94c8ea32", + "build_timestamp" : "2014-02-12T16:18:34Z", + "build_snapshot" : false, + "lucene_version" : "4.6" + }, + "tagline" : "You Know, for Search" +} +``` + +### Install elasticsearch cloud azure plugin + +```sh +# Stop elasticsearch +sudo service elasticsearch stop + +# Install the plugin +sudo /usr/share/elasticsearch/bin/plugin install elasticsearch/elasticsearch-cloud-azure/2.6.1 + +# Configure it +sudo vi /etc/elasticsearch/elasticsearch.yml +``` + +And add the following lines: + +```yaml +# If you don't remember your account id, you may get it with `azure account list` +cloud: + azure: + management: + subscription.id: your_azure_subscription_id + cloud.service.name: your_azure_cloud_service_name + keystore: + path: /home/elasticsearch/azurekeystore.pkcs12 + password: your_password_for_keystore + +discovery: + type: azure + +# Recommended (warning: non durable disk) +# path.data: /mnt/resource/elasticsearch/data +``` + +Restart elasticsearch: + +```sh +sudo service elasticsearch start +``` + +If anything goes wrong, check your logs in `/var/log/elasticsearch`. + + +Scaling Out! +------------ + +You need first to create an image of your previous machine. +Disconnect from your machine and run locally the following commands: + +```sh +# Shutdown the instance +azure vm shutdown myesnode1 + +# Create an image from this instance (it could take some minutes) +azure vm capture myesnode1 esnode-image --delete + +# Note that the previous instance has been deleted (mandatory) +# So you need to create it again and BTW create other instances. + +azure vm create azure-elasticsearch-cluster \ + esnode-image \ + --vm-name myesnode1 \ + --location "West Europe" \ + --vm-size extrasmall \ + --ssh 22 \ + --ssh-cert /tmp/azure-certificate.pem \ + elasticsearch password1234\!\! +``` + +> **Note:** It could happen that azure changes the endpoint public IP address. +> DNS propagation could take some minutes before you can connect again using +> name. You can get from azure the IP address if needed, using: +> +> ```sh +> # Look at Network `Endpoints 0 Vip` +> azure vm show myesnode1 +> ``` + +Let's start more instances! + +```sh +for x in $(seq 2 10) + do + echo "Launching azure instance #$x..." + azure vm create azure-elasticsearch-cluster \ + esnode-image \ + --vm-name myesnode$x \ + --vm-size extrasmall \ + --ssh $((21 + $x)) \ + --ssh-cert /tmp/azure-certificate.pem \ + --connect \ + elasticsearch password1234\!\! + done +``` + +If you want to remove your running instances: + +``` +azure vm delete myesnode1 +``` + +Azure Repository +================ + +To enable Azure repositories, you have first to set your azure storage settings in `elasticsearch.yml` file: + +``` +cloud: + azure: + storage: + account: your_azure_storage_account + key: your_azure_storage_key +``` + +For information, in previous version of the azure plugin, settings were: + +``` +cloud: + azure: + storage_account: your_azure_storage_account + storage_key: your_azure_storage_key +``` + +The Azure repository supports following settings: + +* `container`: Container name. Defaults to `elasticsearch-snapshots` +* `base_path`: Specifies the path within container to repository data. Defaults to empty (root directory). +* `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified +in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `64m` (64m max) +* `compress`: When set to `true` metadata files are stored in compressed format. This setting doesn't affect index +files that are already compressed by default. Defaults to `false`. + +Some examples, using scripts: + +```sh +# The simpliest one +$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup1' -d '{ + "type": "azure" +}' + +# With some settings +$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup2' -d '{ + "type": "azure", + "settings": { + "container": "backup_container", + "base_path": "backups", + "chunk_size": "32m", + "compress": true + } +}' +``` + +Example using Java: + +```java +client.admin().cluster().preparePutRepository("my_backup3") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(Storage.CONTAINER, "backup_container") + .put(Storage.CHUNK_SIZE, new ByteSizeValue(32, ByteSizeUnit.MB)) + ).get(); +``` + +Repository validation rules +--------------------------- + +According to the [containers naming guide](http://msdn.microsoft.com/en-us/library/dd135715.aspx), a container name must +be a valid DNS name, conforming to the following naming rules: + +* Container names must start with a letter or number, and can contain only letters, numbers, and the dash (-) character. +* Every dash (-) character must be immediately preceded and followed by a letter or number; consecutive dashes are not +permitted in container names. +* All letters in a container name must be lowercase. +* Container names must be from 3 through 63 characters long. + + +Testing +======= + +Integrations tests in this plugin require working Azure configuration and therefore disabled by default. +To enable tests prepare a config file `elasticsearch.yml` with the following content: + +``` +cloud: + azure: + storage: + account: "YOUR-AZURE-STORAGE-NAME" + key: "YOUR-AZURE-STORAGE-KEY" +``` + +Replaces `account`, `key` with your settings. Please, note that the test will delete all snapshot/restore related files in the specified bucket. + +To run test: + +```sh +mvn -Dtests.azure=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test +``` + +Working around a bug in Windows SMB and Java on windows +======================================================= +When using a shared file system based on the SMB protocol (like Azure File Service) to store indices, the way Lucene open index segment files is with a write only flag. This is the *correct* way to open the files, as they will only be used for writes and allows different FS implementations to optimize for it. Sadly, in windows with SMB, this disables the cache manager, causing writes to be slow. This has been described in [LUCENE-6176](https://issues.apache.org/jira/browse/LUCENE-6176), but it affects each and every Java program out there!. This need and must be fixed outside of ES and/or Lucene, either in windows or OpenJDK. For now, we are providing an experimental support to open the files with read flag, but this should be considered experimental and the correct way to fix it is in OpenJDK or Windows. + +The Azure Cloud plugin provides two storage types optimized for SMB: + +- `smb_mmap_fs`: a SMB specific implementation of the default [mmap fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#mmapfs) +- `smb_simple_fs`: a SMB specific implementation of the default [simple fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#simplefs) + +To use one of these specific storage types, you need to install the Azure Cloud plugin and restart the node. +Then configure Elasticsearch to set the storage type you want. + +This can be configured for all indices by adding this to the `elasticsearch.yml` file: + +```yaml +index.store.type: smb_simple_fs +``` + +Note that setting will be applied for newly created indices. + +It can also be set on a per-index basis at index creation time: + +```sh +curl -XPUT localhost:9200/my_index -d '{ + "settings": { + "index.store.type": "smb_mmap_fs" + } +}' +``` + + +License +------- + +This software is licensed under the Apache 2 license, quoted below. + + Copyright 2009-2014 Elasticsearch + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy of + the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations under + the License. diff --git a/plugins/cloud-azure/pom.xml b/plugins/cloud-azure/pom.xml new file mode 100644 index 00000000000..f6c8b42601e --- /dev/null +++ b/plugins/cloud-azure/pom.xml @@ -0,0 +1,64 @@ + + + + + 4.0.0 + + org.elasticsearch.plugin + elasticsearch-cloud-azure + + jar + Elasticsearch Azure cloud plugin + The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories. + + + org.elasticsearch + elasticsearch-plugin + 2.0.0-SNAPSHOT + + + + + 1 + + + + + + com.microsoft.azure + azure-storage + 2.0.0 + + + com.microsoft.azure + azure-management-compute + 0.7.0 + + + com.microsoft.azure + azure-management + 0.7.0 + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + + + diff --git a/plugins/cloud-azure/src/main/assemblies/plugin.xml b/plugins/cloud-azure/src/main/assemblies/plugin.xml new file mode 100644 index 00000000000..bf4bf511b12 --- /dev/null +++ b/plugins/cloud-azure/src/main/assemblies/plugin.xml @@ -0,0 +1,39 @@ + + + + + plugin + + zip + + false + + + / + true + true + + org.elasticsearch:elasticsearch + + + + / + true + true + + com.microsoft.azure:azure-storage + com.microsoft.azure:azure-management + com.microsoft.azure:azure-management-compute + + + + diff --git a/plugins/cloud-azure/src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java b/plugins/cloud-azure/src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java new file mode 100644 index 00000000000..1e783fdd6c5 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.store; + +import java.io.FilterOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; + +/** + * This class is used to wrap an existing {@link org.apache.lucene.store.FSDirectory} so that + * the new shard segment files will be opened for Read and Write access. + *

+ * When storing index files on an SMB share like Azure File Service, opening the file for Read + * access can save a lot of roundtrips to the storage server and thus offering better performance. + */ +public final class SmbDirectoryWrapper extends FilterDirectory { + + private final FSDirectory fsDirectory; + + public SmbDirectoryWrapper(FSDirectory in) { + super(in); + fsDirectory = in; + } + + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + fsDirectory.ensureOpen(); + fsDirectory.ensureCanWrite(name); + return new SmbFSIndexOutput(name); + } + + /** + * Copied from final inner class {@link org.apache.lucene.store.FSDirectory.FSIndexOutput} + */ + final class SmbFSIndexOutput extends OutputStreamIndexOutput { + /** + * The maximum chunk size is 8192 bytes, because {@link java.io.FileOutputStream} mallocs + * a native buffer outside of stack if the write buffer size is larger. + */ + static final int CHUNK_SIZE = 8192; + + private final String name; + + public SmbFSIndexOutput(String name) throws IOException { + super("SmbFSIndexOutput(path=\"" + fsDirectory.getDirectory().resolve(name) + "\")", new FilterOutputStream(Channels.newOutputStream(Files.newByteChannel(fsDirectory.getDirectory().resolve(name), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE))) { + // This implementation ensures, that we never write more than CHUNK_SIZE bytes: + @Override + public void write(byte[] b, int offset, int length) throws IOException { + while (length > 0) { + final int chunk = Math.min(length, CHUNK_SIZE); + out.write(b, offset, chunk); + length -= chunk; + offset += chunk; + } + } + }, CHUNK_SIZE); + this.name = name; + } + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java new file mode 100644 index 00000000000..d7d1c81ff18 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; +import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.azure.AzureDiscovery; + +/** + * Azure Module + * + *

+ * + * @see org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl + * @see org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl + */ +public class AzureModule extends AbstractModule { + protected final ESLogger logger; + private Settings settings; + + @Inject + public AzureModule(Settings settings) { + this.settings = settings; + this.logger = Loggers.getLogger(getClass(), settings); + } + + @Override + protected void configure() { + logger.debug("starting azure services"); + + // If we have set discovery to azure, let's start the azure compute service + if (isDiscoveryReady(settings, logger)) { + logger.debug("starting azure discovery service"); + bind(AzureComputeService.class) + .to(settings.getAsClass(Management.API_IMPLEMENTATION, AzureComputeServiceImpl.class)) + .asEagerSingleton(); + } + + // If we have settings for azure repository, let's start the azure storage service + if (isSnapshotReady(settings, logger)) { + logger.debug("starting azure repository service"); + bind(AzureStorageService.class) + .to(settings.getAsClass(Storage.API_IMPLEMENTATION, AzureStorageServiceImpl.class)) + .asEagerSingleton(); + } + } + + /** + * Check if discovery is meant to start + * @return true if we can start discovery features + */ + public static boolean isCloudReady(Settings settings) { + return (settings.getAsBoolean("cloud.enabled", true)); + } + + /** + * Check if discovery is meant to start + * @return true if we can start discovery features + */ + public static boolean isDiscoveryReady(Settings settings, ESLogger logger) { + // Cloud services are disabled + if (!isCloudReady(settings)) { + logger.trace("cloud settings are disabled"); + return false; + } + + // User set discovery.type: azure + if (!AzureDiscovery.AZURE.equalsIgnoreCase(settings.get("discovery.type"))) { + logger.trace("discovery.type not set to {}", AzureDiscovery.AZURE); + return false; + } + + if (isPropertyMissing(settings, Management.SUBSCRIPTION_ID) || + isPropertyMissing(settings, Management.SERVICE_NAME) || + isPropertyMissing(settings, Management.KEYSTORE_PATH) || + isPropertyMissing(settings, Management.KEYSTORE_PASSWORD) + ) { + logger.debug("one or more azure discovery settings are missing. " + + "Check elasticsearch.yml file. Should have [{}], [{}], [{}] and [{}].", + Management.SUBSCRIPTION_ID, + Management.SERVICE_NAME, + Management.KEYSTORE_PATH, + Management.KEYSTORE_PASSWORD); + return false; + } + + logger.trace("all required properties for azure discovery are set!"); + + return true; + } + + /** + * Check if we have repository azure settings available + * @return true if we can use snapshot and restore + */ + public static boolean isSnapshotReady(Settings settings, ESLogger logger) { + // Cloud services are disabled + if (!isCloudReady(settings)) { + logger.trace("cloud settings are disabled"); + return false; + } + + if (isPropertyMissing(settings, Storage.ACCOUNT) || + isPropertyMissing(settings, Storage.KEY)) { + logger.debug("azure repository is not set using [{}] and [{}] properties", + Storage.ACCOUNT, + Storage.KEY); + return false; + } + + logger.trace("all required properties for azure repository are set!"); + + return true; + } + + public static boolean isPropertyMissing(Settings settings, String name) throws ElasticsearchException { + if (!Strings.hasText(settings.get(name))) { + return true; + } + return false; + } + +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java new file mode 100644 index 00000000000..487997d71b6 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +public class AzureServiceDisableException extends IllegalStateException { + public AzureServiceDisableException(String msg) { + super(msg); + } + + public AzureServiceDisableException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java new file mode 100644 index 00000000000..4bd4f1d67f1 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +public class AzureServiceRemoteException extends IllegalStateException { + public AzureServiceRemoteException(String msg) { + super(msg); + } + + public AzureServiceRemoteException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java new file mode 100644 index 00000000000..bc15a64fc73 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -0,0 +1,148 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.repositories.RepositoryException; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URISyntaxException; +import java.util.Map; + +/** + * + */ +public class AzureBlobContainer extends AbstractBlobContainer { + + protected final ESLogger logger = Loggers.getLogger(AzureBlobContainer.class); + protected final AzureBlobStore blobStore; + + protected final String keyPath; + protected final String repositoryName; + + public AzureBlobContainer(String repositoryName, BlobPath path, AzureBlobStore blobStore) { + super(path); + this.blobStore = blobStore; + String keyPath = path.buildAsString("/"); + if (!keyPath.isEmpty()) { + keyPath = keyPath + "/"; + } + this.keyPath = keyPath; + this.repositoryName = repositoryName; + } + + @Override + public boolean blobExists(String blobName) { + try { + return blobStore.client().blobExists(blobStore.container(), buildKey(blobName)); + } catch (URISyntaxException | StorageException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); + } + return false; + } + + @Override + public InputStream openInput(String blobName) throws IOException { + try { + return blobStore.client().getInputStream(blobStore.container(), buildKey(blobName)); + } catch (StorageException e) { + if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { + throw new FileNotFoundException(e.getMessage()); + } + throw new IOException(e); + } catch (URISyntaxException e) { + throw new IOException(e); + } + } + + @Override + public OutputStream createOutput(String blobName) throws IOException { + try { + return new AzureOutputStream(blobStore.client().getOutputStream(blobStore.container(), buildKey(blobName))); + } catch (StorageException e) { + if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { + throw new FileNotFoundException(e.getMessage()); + } + throw new IOException(e); + } catch (URISyntaxException e) { + throw new IOException(e); + } catch (IllegalArgumentException e) { + throw new RepositoryException(repositoryName, e.getMessage()); + } + } + + @Override + public void deleteBlob(String blobName) throws IOException { + try { + blobStore.client().deleteBlob(blobStore.container(), buildKey(blobName)); + } catch (URISyntaxException | StorageException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } + } + + @Override + public Map listBlobsByPrefix(@Nullable String prefix) throws IOException { + + try { + return blobStore.client().listBlobsByPrefix(blobStore.container(), keyPath, prefix); + } catch (URISyntaxException | StorageException e) { + logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); + throw new IOException(e); + } + } + + @Override + public void move(String sourceBlobName, String targetBlobName) throws IOException { + try { + String source = keyPath + sourceBlobName; + String target = keyPath + targetBlobName; + + logger.debug("moving blob [{}] to [{}] in container {{}}", source, target, blobStore.container()); + + blobStore.client().moveBlob(blobStore.container(), source, target); + } catch (URISyntaxException e) { + logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } catch (StorageException e) { + logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } + } + + @Override + public Map listBlobs() throws IOException { + return listBlobsByPrefix(null); + } + + protected String buildKey(String blobName) { + return keyPath + blobName; + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java new file mode 100644 index 00000000000..6edcae73e07 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; + +import java.net.URISyntaxException; + +import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.CONTAINER; +import static org.elasticsearch.repositories.azure.AzureRepository.CONTAINER_DEFAULT; + +/** + * + */ +public class AzureBlobStore extends AbstractComponent implements BlobStore { + + private final AzureStorageService client; + + private final String container; + private final String repositoryName; + + @Inject + public AzureBlobStore(RepositoryName name, Settings settings, RepositorySettings repositorySettings, + AzureStorageService client) throws URISyntaxException, StorageException { + super(settings); + this.client = client; + this.container = repositorySettings.settings().get("container", settings.get(CONTAINER, CONTAINER_DEFAULT)); + this.repositoryName = name.getName(); + } + + @Override + public String toString() { + return container; + } + + public AzureStorageService client() { + return client; + } + + public String container() { + return container; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + return new AzureBlobContainer(repositoryName, path, this); + } + + @Override + public void delete(BlobPath path) { + String keyPath = path.buildAsString("/"); + if (!keyPath.isEmpty()) { + keyPath = keyPath + "/"; + } + + try { + client.deleteFiles(container, keyPath); + } catch (URISyntaxException | StorageException e) { + logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + } + } + + @Override + public void close() { + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java new file mode 100644 index 00000000000..6a95eeba778 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import java.io.IOException; +import java.io.OutputStream; + +public class AzureOutputStream extends OutputStream { + + private final OutputStream blobOutputStream; + + public AzureOutputStream(OutputStream blobOutputStream) { + this.blobOutputStream = blobOutputStream; + } + + @Override + public void write(int b) throws IOException { + blobOutputStream.write(b); + } + + @Override + public void close() throws IOException { + try { + blobOutputStream.close(); + } catch (IOException e) { + // Azure is sending a "java.io.IOException: Stream is already closed." + } + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java new file mode 100644 index 00000000000..c79a7450929 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; + +/** + * + */ +public interface AzureComputeService { + + static public final class Management { + public static final String API_IMPLEMENTATION = "cloud.azure.management.api.impl"; + + public static final String SUBSCRIPTION_ID = "cloud.azure.management.subscription.id"; + public static final String SERVICE_NAME = "cloud.azure.management.cloud.service.name"; + + // Keystore settings + public static final String KEYSTORE_PATH = "cloud.azure.management.keystore.path"; + public static final String KEYSTORE_PASSWORD = "cloud.azure.management.keystore.password"; + public static final String KEYSTORE_TYPE = "cloud.azure.management.keystore.type"; + } + + static public final class Discovery { + public static final String REFRESH = "discovery.azure.refresh_interval"; + + public static final String HOST_TYPE = "discovery.azure.host.type"; + public static final String ENDPOINT_NAME = "discovery.azure.endpoint.name"; + public static final String DEPLOYMENT_NAME = "discovery.azure.deployment.name"; + public static final String DEPLOYMENT_SLOT = "discovery.azure.deployment.slot"; + } + public HostedServiceGetDetailedResponse getServiceDetails(); +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java new file mode 100644 index 00000000000..26406e3811c --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import com.microsoft.windowsazure.Configuration; +import com.microsoft.windowsazure.core.utils.KeyStoreType; +import com.microsoft.windowsazure.management.compute.ComputeManagementClient; +import com.microsoft.windowsazure.management.compute.ComputeManagementService; +import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; +import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.AzureServiceDisableException; +import org.elasticsearch.cloud.azure.AzureServiceRemoteException; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.*; + +/** + * + */ +public class AzureComputeServiceImpl extends AbstractLifecycleComponent + implements AzureComputeService { + + static final class Azure { + private static final String ENDPOINT = "https://management.core.windows.net/"; + } + + private final ComputeManagementClient computeManagementClient; + private final String serviceName; + + @Inject + public AzureComputeServiceImpl(Settings settings) { + super(settings); + String subscriptionId = settings.get(SUBSCRIPTION_ID); + + serviceName = settings.get(Management.SERVICE_NAME); + String keystorePath = settings.get(KEYSTORE_PATH); + String keystorePassword = settings.get(KEYSTORE_PASSWORD); + String strKeyStoreType = settings.get(KEYSTORE_TYPE, KeyStoreType.pkcs12.name()); + KeyStoreType tmpKeyStoreType = KeyStoreType.pkcs12; + try { + tmpKeyStoreType = KeyStoreType.fromString(strKeyStoreType); + } catch (Exception e) { + logger.warn("wrong value for [{}]: [{}]. falling back to [{}]...", KEYSTORE_TYPE, + strKeyStoreType, KeyStoreType.pkcs12.name()); + } + KeyStoreType keystoreType = tmpKeyStoreType; + + // Check that we have all needed properties + Configuration configuration; + try { + configuration = ManagementConfiguration.configure(new URI(Azure.ENDPOINT), + subscriptionId, keystorePath, keystorePassword, keystoreType); + } catch (IOException|URISyntaxException e) { + logger.error("can not start azure client: {}", e.getMessage()); + computeManagementClient = null; + return; + } + logger.trace("creating new Azure client for [{}], [{}]", subscriptionId, serviceName); + computeManagementClient = ComputeManagementService.create(configuration); + } + + @Override + public HostedServiceGetDetailedResponse getServiceDetails() { + if (computeManagementClient == null) { + // Azure plugin is disabled + throw new AzureServiceDisableException("azure plugin is disabled."); + } + + try { + return computeManagementClient.getHostedServicesOperations().getDetailed(serviceName); + } catch (Exception e) { + throw new AzureServiceRemoteException("can not get list of azure nodes", e); + } + } + + @Override + protected void doStart() throws ElasticsearchException { + } + + @Override + protected void doStop() throws ElasticsearchException { + } + + @Override + protected void doClose() throws ElasticsearchException { + if (computeManagementClient != null) { + try { + computeManagementClient.close(); + } catch (IOException e) { + logger.error("error while closing Azure client", e); + } + } + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeSettingsFilter.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeSettingsFilter.java new file mode 100644 index 00000000000..e584cefffa2 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeSettingsFilter.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; + +import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.*; + +public class AzureComputeSettingsFilter extends AbstractComponent { + + @Inject + protected AzureComputeSettingsFilter(Settings settings, SettingsFilter settingsFilter) { + super(settings); + // Cloud management API settings we need to hide + settingsFilter.addFilter(KEYSTORE_PATH); + settingsFilter.addFilter(KEYSTORE_PASSWORD); + settingsFilter.addFilter(KEYSTORE_TYPE); + settingsFilter.addFilter(SUBSCRIPTION_ID); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java new file mode 100644 index 00000000000..c9b48aea052 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.storage; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.common.blobstore.BlobMetaData; + +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URISyntaxException; +import java.util.Map; + +/** + * Azure Storage Service interface + * @see AzureStorageServiceImpl for Azure REST API implementation + */ +public interface AzureStorageService { + static public final class Storage { + public static final String API_IMPLEMENTATION = "cloud.azure.storage.api.impl"; + public static final String ACCOUNT = "cloud.azure.storage.account"; + public static final String KEY = "cloud.azure.storage.key"; + public static final String CONTAINER = "repositories.azure.container"; + public static final String BASE_PATH = "repositories.azure.base_path"; + public static final String CHUNK_SIZE = "repositories.azure.chunk_size"; + public static final String COMPRESS = "repositories.azure.compress"; + } + + boolean doesContainerExist(String container); + + void removeContainer(String container) throws URISyntaxException, StorageException; + + void createContainer(String container) throws URISyntaxException, StorageException; + + void deleteFiles(String container, String path) throws URISyntaxException, StorageException; + + boolean blobExists(String container, String blob) throws URISyntaxException, StorageException; + + void deleteBlob(String container, String blob) throws URISyntaxException, StorageException; + + InputStream getInputStream(String container, String blob) throws URISyntaxException, StorageException; + + OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException; + + Map listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException; + + void moveBlob(String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException; +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java new file mode 100644 index 00000000000..58c73bc1e57 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -0,0 +1,218 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.storage; + +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.*; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryException; + +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Map; + +import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.*; + +/** + * + */ +public class AzureStorageServiceImpl extends AbstractLifecycleComponent + implements AzureStorageService { + + private final String account; + private final String key; + private final String blob; + + private CloudBlobClient client; + + @Inject + public AzureStorageServiceImpl(Settings settings) { + super(settings); + // We try to load storage API settings from `cloud.azure.` + account = settings.get(ACCOUNT); + key = settings.get(KEY); + blob = "http://" + account + ".blob.core.windows.net/"; + + try { + if (account != null) { + logger.trace("creating new Azure storage client using account [{}], key [{}], blob [{}]", account, key, blob); + + String storageConnectionString = + "DefaultEndpointsProtocol=http;" + + "AccountName="+ account +";" + + "AccountKey=" + key; + + // Retrieve storage account from connection-string. + CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); + + // Create the blob client. + client = storageAccount.createCloudBlobClient(); + } + } catch (Exception e) { + // Can not start Azure Storage Client + logger.error("can not start azure storage client: {}", e.getMessage()); + } + } + + @Override + public boolean doesContainerExist(String container) { + try { + CloudBlobContainer blob_container = client.getContainerReference(container); + return blob_container.exists(); + } catch (Exception e) { + logger.error("can not access container [{}]", container); + } + return false; + } + + @Override + public void removeContainer(String container) throws URISyntaxException, StorageException { + CloudBlobContainer blob_container = client.getContainerReference(container); + // TODO Should we set some timeout and retry options? + /* + BlobRequestOptions options = new BlobRequestOptions(); + options.setTimeoutIntervalInMs(1000); + options.setRetryPolicyFactory(new RetryNoRetry()); + blob_container.deleteIfExists(options, null); + */ + logger.trace("removing container [{}]", container); + blob_container.deleteIfExists(); + } + + @Override + public void createContainer(String container) throws URISyntaxException, StorageException { + try { + CloudBlobContainer blob_container = client.getContainerReference(container); + logger.trace("creating container [{}]", container); + blob_container.createIfNotExists(); + } catch (IllegalArgumentException e) { + logger.trace("fails creating container [{}]", container, e.getMessage()); + throw new RepositoryException(container, e.getMessage()); + } + } + + @Override + public void deleteFiles(String container, String path) throws URISyntaxException, StorageException { + logger.trace("delete files container [{}], path [{}]", container, path); + + // Container name must be lower case. + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + for (ListBlobItem blobItem : blob_container.listBlobs(path)) { + logger.trace("removing blob [{}]", blobItem.getUri()); + deleteBlob(container, blobItem.getUri().toString()); + } + } + } + + @Override + public boolean blobExists(String container, String blob) throws URISyntaxException, StorageException { + // Container name must be lower case. + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob); + return azureBlob.exists(); + } + + return false; + } + + @Override + public void deleteBlob(String container, String blob) throws URISyntaxException, StorageException { + logger.trace("delete blob for container [{}], blob [{}]", container, blob); + + // Container name must be lower case. + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + logger.trace("container [{}]: blob [{}] found. removing.", container, blob); + CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob); + azureBlob.delete(); + } + } + + @Override + public InputStream getInputStream(String container, String blob) throws URISyntaxException, StorageException { + logger.trace("reading container [{}], blob [{}]", container, blob); + return client.getContainerReference(container).getBlockBlobReference(blob).openInputStream(); + } + + @Override + public OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException { + logger.trace("writing container [{}], blob [{}]", container, blob); + return client.getContainerReference(container).getBlockBlobReference(blob).openOutputStream(); + } + + @Override + public Map listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException { + logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); + MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + for (ListBlobItem blobItem : blob_container.listBlobs(keyPath + prefix)) { + URI uri = blobItem.getUri(); + logger.trace("blob url [{}]", uri); + String blobpath = uri.getPath().substring(container.length() + 1); + BlobProperties properties = blob_container.getBlockBlobReference(blobpath).getProperties(); + String name = blobpath.substring(keyPath.length() + 1); + logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); + blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); + } + } + + return blobsBuilder.immutableMap(); + } + + @Override + public void moveBlob(String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { + logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}]", container, sourceBlob, targetBlob); + CloudBlobContainer blob_container = client.getContainerReference(container); + CloudBlockBlob blobSource = blob_container.getBlockBlobReference(sourceBlob); + if (blobSource.exists()) { + CloudBlockBlob blobTarget = blob_container.getBlockBlobReference(targetBlob); + blobTarget.startCopyFromBlob(blobSource); + blobSource.delete(); + logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, sourceBlob, targetBlob); + } + } + + @Override + protected void doStart() throws ElasticsearchException { + logger.debug("starting azure storage client instance"); + } + + @Override + protected void doStop() throws ElasticsearchException { + logger.debug("stopping azure storage client instance"); + } + + @Override + protected void doClose() throws ElasticsearchException { + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java new file mode 100644 index 00000000000..ac23f0c2463 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.storage; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; + +import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.*; + +public class AzureStorageSettingsFilter extends AbstractComponent { + + @Inject + protected AzureStorageSettingsFilter(Settings settings, SettingsFilter settingsFilter) { + super(settings); + // Cloud storage API settings needed to be hidden + settingsFilter.addFilter(ACCOUNT); + settingsFilter.addFilter(KEY); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java new file mode 100755 index 00000000000..b8ea50159a6 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.settings.ClusterDynamicSettings; +import org.elasticsearch.cluster.settings.DynamicSettings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.discovery.zen.ping.ZenPingService; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class AzureDiscovery extends ZenDiscovery { + + public static final String AZURE = "azure"; + + @Inject + public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + DiscoverySettings discoverySettings, ElectMasterService electMasterService, @ClusterDynamicSettings DynamicSettings dynamicSettings) { + super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + pingService, electMasterService, discoverySettings, dynamicSettings); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java new file mode 100644 index 00000000000..886baa44774 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.cloud.azure.AzureModule; +import org.elasticsearch.cloud.azure.management.AzureComputeSettingsFilter; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.zen.ZenDiscoveryModule; + +/** + * + */ +public class AzureDiscoveryModule extends ZenDiscoveryModule { + + protected final ESLogger logger; + private Settings settings; + + public AzureDiscoveryModule(Settings settings) { + super(); + this.logger = Loggers.getLogger(getClass(), settings); + this.settings = settings; + if (AzureModule.isDiscoveryReady(settings, logger)) { + addUnicastHostProvider(AzureUnicastHostsProvider.class); + } + } + + @Override + protected void bindDiscovery() { + bind(AzureComputeSettingsFilter.class).asEagerSingleton(); + if (AzureModule.isDiscoveryReady(settings, logger)) { + bind(Discovery.class).to(AzureDiscovery.class).asEagerSingleton(); + } else { + logger.debug("disabling azure discovery features"); + } + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java new file mode 100644 index 00000000000..bfd6aacbae3 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -0,0 +1,269 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import com.microsoft.windowsazure.management.compute.models.*; +import org.elasticsearch.Version; +import org.elasticsearch.cloud.azure.AzureServiceDisableException; +import org.elasticsearch.cloud.azure.AzureServiceRemoteException; +import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Locale; +import java.util.List; + +/** + * + */ +public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static enum HostType { + PRIVATE_IP("private_ip"), + PUBLIC_IP("public_ip"); + + private String type ; + + private HostType(String type) { + this.type = type ; + } + + public static HostType fromString(String type) { + for (HostType hostType : values()) { + if (hostType.type.equalsIgnoreCase(type)) { + return hostType; + } + } + return null; + } + } + + public static enum Deployment { + PRODUCTION("production", DeploymentSlot.Production), + STAGING("staging", DeploymentSlot.Staging); + + private String deployment; + private DeploymentSlot slot; + + private Deployment(String deployment, DeploymentSlot slot) { + this.deployment = deployment; + this.slot = slot; + } + + public static Deployment fromString(String string) { + for (Deployment deployment : values()) { + if (deployment.deployment.equalsIgnoreCase(string)) { + return deployment; + } + } + return null; + } + } + + private final AzureComputeService azureComputeService; + private TransportService transportService; + private NetworkService networkService; + private final Version version; + + private final TimeValue refreshInterval; + private long lastRefresh; + private List cachedDiscoNodes; + private final HostType hostType; + private final String publicEndpointName; + private final String deploymentName; + private final DeploymentSlot deploymentSlot; + + @Inject + public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, + TransportService transportService, + NetworkService networkService, + Version version) { + super(settings); + this.azureComputeService = azureComputeService; + this.transportService = transportService; + this.networkService = networkService; + this.version = version; + + this.refreshInterval = settings.getAsTime(Discovery.REFRESH, TimeValue.timeValueSeconds(0)); + + String strHostType = settings.get(Discovery.HOST_TYPE, HostType.PRIVATE_IP.name()).toUpperCase(Locale.ROOT); + HostType tmpHostType = HostType.fromString(strHostType); + if (tmpHostType == null) { + logger.warn("wrong value for [{}]: [{}]. falling back to [{}]...", Discovery.HOST_TYPE, + strHostType, HostType.PRIVATE_IP.name().toLowerCase(Locale.ROOT)); + tmpHostType = HostType.PRIVATE_IP; + } + this.hostType = tmpHostType; + this.publicEndpointName = settings.get(Discovery.ENDPOINT_NAME, "elasticsearch"); + + // Deployment name could be set with discovery.azure.deployment.name + // Default to cloud.azure.management.cloud.service.name + this.deploymentName = settings.get(Discovery.DEPLOYMENT_NAME); + + // Reading deployment_slot + String strDeployment = settings.get(Discovery.DEPLOYMENT_SLOT, Deployment.PRODUCTION.deployment); + Deployment tmpDeployment = Deployment.fromString(strDeployment); + if (tmpDeployment == null) { + logger.warn("wrong value for [{}]: [{}]. falling back to [{}]...", Discovery.DEPLOYMENT_SLOT, strDeployment, + Deployment.PRODUCTION.deployment); + tmpDeployment = Deployment.PRODUCTION; + } + this.deploymentSlot = tmpDeployment.slot; + } + + /** + * We build the list of Nodes from Azure Management API + * Information can be cached using `cloud.azure.refresh_interval` property if needed. + * Setting `cloud.azure.refresh_interval` to `-1` will cause infinite caching. + * Setting `cloud.azure.refresh_interval` to `0` will disable caching (default). + */ + @Override + public List buildDynamicNodes() { + if (refreshInterval.millis() != 0) { + if (cachedDiscoNodes != null && + (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { + logger.trace("using cache to retrieve node list"); + return cachedDiscoNodes; + } + lastRefresh = System.currentTimeMillis(); + } + logger.debug("start building nodes list using Azure API"); + + cachedDiscoNodes = new ArrayList<>(); + + HostedServiceGetDetailedResponse detailed; + try { + detailed = azureComputeService.getServiceDetails(); + } catch (AzureServiceDisableException e) { + logger.debug("Azure discovery service has been disabled. Returning empty list of nodes."); + return cachedDiscoNodes; + } catch (AzureServiceRemoteException e) { + // We got a remote exception + logger.warn("can not get list of azure nodes: [{}]. Returning empty list of nodes.", e.getMessage()); + logger.trace("AzureServiceRemoteException caught", e); + return cachedDiscoNodes; + } + + InetAddress ipAddress = null; + try { + ipAddress = networkService.resolvePublishHostAddress(null); + logger.trace("ip of current node: [{}]", ipAddress); + } catch (IOException e) { + // We can't find the publish host address... Hmmm. Too bad :-( + logger.trace("exception while finding ip", e); + } + + for (HostedServiceGetDetailedResponse.Deployment deployment : detailed.getDeployments()) { + // We check the deployment slot + if (deployment.getDeploymentSlot() != deploymentSlot) { + logger.debug("current deployment slot [{}] for [{}] is different from [{}]. skipping...", + deployment.getDeploymentSlot(), deployment.getName(), deploymentSlot); + continue; + } + + // If provided, we check the deployment name + if (deploymentName != null && !deploymentName.equals(deployment.getName())) { + logger.debug("current deployment name [{}] different from [{}]. skipping...", + deployment.getName(), deploymentName); + continue; + } + + // We check current deployment status + if (deployment.getStatus() != DeploymentStatus.Starting && + deployment.getStatus() != DeploymentStatus.Deploying && + deployment.getStatus() != DeploymentStatus.Running) { + logger.debug("[{}] status is [{}]. skipping...", + deployment.getName(), deployment.getStatus()); + continue; + } + + // In other case, it should be the right deployment so we can add it to the list of instances + + for (RoleInstance instance : deployment.getRoleInstances()) { + String networkAddress = null; + // Let's detect if we want to use public or private IP + switch (hostType) { + case PRIVATE_IP: + InetAddress privateIp = instance.getIPAddress(); + + if (privateIp != null) { + if (privateIp.equals(ipAddress)) { + logger.trace("adding ourselves {}", ipAddress); + } + networkAddress = privateIp.getHostAddress(); + } else { + logger.trace("no private ip provided. ignoring [{}]...", instance.getInstanceName()); + } + break; + case PUBLIC_IP: + for (InstanceEndpoint endpoint : instance.getInstanceEndpoints()) { + if (!publicEndpointName.equals(endpoint.getName())) { + logger.trace("ignoring endpoint [{}] as different than [{}]", + endpoint.getName(), publicEndpointName); + continue; + } + + networkAddress = endpoint.getVirtualIPAddress().getHostAddress() + ":" + endpoint.getPort(); + } + + if (networkAddress == null) { + logger.trace("no public ip provided. ignoring [{}]...", instance.getInstanceName()); + } + break; + default: + // This could never happen! + logger.warn("undefined host_type [{}]. Please check your settings.", hostType); + return cachedDiscoNodes; + } + + if (networkAddress == null) { + // We have a bad parameter here or not enough information from azure + logger.warn("no network address found. ignoring [{}]...", instance.getInstanceName()); + continue; + } + + try { + TransportAddress[] addresses = transportService.addressesFromString(networkAddress); + // we only limit to 1 addresses, makes no sense to ping 100 ports + logger.trace("adding {}, transport_address {}", networkAddress, addresses[0]); + cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getInstanceName(), addresses[0], + version.minimumCompatibilityVersion())); + } catch (Exception e) { + logger.warn("can not convert [{}] to transport address. skipping. [{}]", networkAddress, e.getMessage()); + } + } + } + + logger.debug("{} node(s) added", cachedDiscoNodes.size()); + + return cachedDiscoNodes; + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java new file mode 100644 index 00000000000..47efbd03531 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.smbmmapfs; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.LockFactory; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.store.SmbDirectoryWrapper; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.IndexStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbMmapFsDirectoryService extends FsDirectoryService { + + @Inject + public SmbMmapFsDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath path) { + super(indexSettings, indexStore, path); + } + + @Override + protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + logger.debug("wrapping MMapDirectory for SMB"); + return new SmbDirectoryWrapper(new MMapDirectory(location, buildLockFactory(indexSettings))); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java new file mode 100644 index 00000000000..04229756e1b --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.smbmmapfs; + +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.settings.IndexSettingsService; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.indices.store.IndicesStore; + +public class SmbMmapFsIndexStore extends IndexStore { + + @Inject + public SmbMmapFsIndexStore(Index index, @IndexSettings Settings indexSettings, + IndexSettingsService indexSettingsService, IndicesStore indicesStore) { + super(index, indexSettings, indexSettingsService, indicesStore); + } + + @Override + public Class shardDirectory() { + return SmbMmapFsDirectoryService.class; + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStoreModule.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStoreModule.java new file mode 100644 index 00000000000..335a58b32c5 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStoreModule.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.smbmmapfs; + +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.index.store.IndexStore; + +public class SmbMmapFsIndexStoreModule extends AbstractModule { + + @Override + protected void configure() { + bind(IndexStore.class).to(SmbMmapFsIndexStore.class).asEagerSingleton(); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java new file mode 100644 index 00000000000..7866090ac55 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.smbsimplefs; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.LockFactory; +import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.SmbDirectoryWrapper; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.FsDirectoryService; +import org.elasticsearch.index.store.IndexStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbSimpleFsDirectoryService extends FsDirectoryService { + + @Inject + public SmbSimpleFsDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath path) { + super(indexSettings, indexStore, path); + } + + @Override + protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + logger.debug("wrapping SimpleFSDirectory for SMB"); + return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory)); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java new file mode 100644 index 00000000000..48133440f6d --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.smbsimplefs; + +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.settings.IndexSettingsService; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.indices.store.IndicesStore; + +public class SmbSimpleFsIndexStore extends IndexStore { + + @Inject + public SmbSimpleFsIndexStore(Index index, @IndexSettings Settings indexSettings, + IndexSettingsService indexSettingsService, IndicesStore indicesStore) { + super(index, indexSettings, indexSettingsService, indicesStore); + } + + @Override + public Class shardDirectory() { + return SmbSimpleFsDirectoryService.class; + } +} + diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStoreModule.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStoreModule.java new file mode 100644 index 00000000000..9604dc11c77 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStoreModule.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.smbsimplefs; + +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.index.store.IndexStore; + +public class SmbSimpleFsIndexStoreModule extends AbstractModule { + + @Override + protected void configure() { + bind(IndexStore.class).to(SmbSimpleFsIndexStore.class).asEagerSingleton(); + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java new file mode 100644 index 00000000000..27c4bf2ddfc --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.cloud.azure; + +import org.elasticsearch.cloud.azure.AzureModule; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.AbstractPlugin; +import org.elasticsearch.repositories.RepositoriesModule; +import org.elasticsearch.repositories.azure.AzureRepository; +import org.elasticsearch.repositories.azure.AzureRepositoryModule; + +import java.util.ArrayList; +import java.util.Collection; + +import static org.elasticsearch.cloud.azure.AzureModule.isSnapshotReady; + +/** + * + */ +public class CloudAzurePlugin extends AbstractPlugin { + + private final Settings settings; + protected final ESLogger logger = Loggers.getLogger(CloudAzurePlugin.class); + + public CloudAzurePlugin(Settings settings) { + this.settings = settings; + logger.trace("starting azure plugin..."); + } + + @Override + public String name() { + return "cloud-azure"; + } + + @Override + public String description() { + return "Cloud Azure Plugin"; + } + + @Override + public Collection> modules() { + Collection> modules = new ArrayList<>(); + if (AzureModule.isCloudReady(settings)) { + modules.add(AzureModule.class); + } + return modules; + } + + @Override + public void processModule(Module module) { + if (isSnapshotReady(settings, logger) + && module instanceof RepositoriesModule) { + ((RepositoriesModule)module).registerRepository(AzureRepository.TYPE, AzureRepositoryModule.class); + } + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java new file mode 100644 index 00000000000..5d9cbd92ac3 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; +import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotCreationException; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.List; + +/** + * Azure file system implementation of the BlobStoreRepository + *

+ * Azure file system repository supports the following settings: + *

+ *
{@code container}
Azure container name. Defaults to elasticsearch-snapshots
+ *
{@code base_path}
Specifies the path within bucket to repository data. Defaults to root directory.
+ *
{@code chunk_size}
Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to 64mb.
+ *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
+ *
+ */ +public class AzureRepository extends BlobStoreRepository { + + public final static String TYPE = "azure"; + public final static String CONTAINER_DEFAULT = "elasticsearch-snapshots"; + + static public final class Repository { + public static final String CONTAINER = "container"; + public static final String CHUNK_SIZE = "chunk_size"; + public static final String COMPRESS = "compress"; + public static final String BASE_PATH = "base_path"; + } + + private final AzureBlobStore blobStore; + + private final BlobPath basePath; + + private ByteSizeValue chunkSize; + + private boolean compress; + + @Inject + public AzureRepository(RepositoryName name, RepositorySettings repositorySettings, + IndexShardRepository indexShardRepository, + AzureBlobStore azureBlobStore) throws IOException, URISyntaxException, StorageException { + super(name.getName(), repositorySettings, indexShardRepository); + + String container = repositorySettings.settings().get(Repository.CONTAINER, + settings.get(Storage.CONTAINER, CONTAINER_DEFAULT)); + + this.blobStore = azureBlobStore; + this.chunkSize = repositorySettings.settings().getAsBytesSize(Repository.CHUNK_SIZE, + settings.getAsBytesSize(Storage.CHUNK_SIZE, new ByteSizeValue(64, ByteSizeUnit.MB))); + + if (this.chunkSize.getMb() > 64) { + logger.warn("azure repository does not support yet size > 64mb. Fall back to 64mb."); + this.chunkSize = new ByteSizeValue(64, ByteSizeUnit.MB); + } + + this.compress = repositorySettings.settings().getAsBoolean(Repository.COMPRESS, + settings.getAsBoolean(Storage.COMPRESS, false)); + String basePath = repositorySettings.settings().get(Repository.BASE_PATH, null); + + if (Strings.hasLength(basePath)) { + // Remove starting / if any + basePath = Strings.trimLeadingCharacter(basePath, '/'); + BlobPath path = new BlobPath(); + for(String elem : Strings.splitStringToArray(basePath, '/')) { + path = path.add(elem); + } + this.basePath = path; + } else { + this.basePath = BlobPath.cleanPath(); + } + logger.debug("using container [{}], chunk_size [{}], compress [{}], base_path [{}]", + container, chunkSize, compress, basePath); + } + + /** + * {@inheritDoc} + */ + @Override + protected BlobStore blobStore() { + return blobStore; + } + + @Override + protected BlobPath basePath() { + return basePath; + } + + /** + * {@inheritDoc} + */ + @Override + protected boolean isCompress() { + return compress; + } + + /** + * {@inheritDoc} + */ + @Override + protected ByteSizeValue chunkSize() { + return chunkSize; + } + + @Override + public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { + try { + if (!blobStore.client().doesContainerExist(blobStore.container())) { + logger.debug("container [{}] does not exist. Creating...", blobStore.container()); + blobStore.client().createContainer(blobStore.container()); + } + super.initializeSnapshot(snapshotId, indices, metaData); + } catch (StorageException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new SnapshotCreationException(snapshotId, e); + } catch (URISyntaxException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new SnapshotCreationException(snapshotId, e); + } + } + + @Override + public String startVerification() { + try { + if (!blobStore.client().doesContainerExist(blobStore.container())) { + logger.debug("container [{}] does not exist. Creating...", blobStore.container()); + blobStore.client().createContainer(blobStore.container()); + } + return super.startVerification(); + } catch (StorageException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new RepositoryVerificationException(repositoryName, "can not initialize container " + blobStore.container(), e); + } catch (URISyntaxException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new RepositoryVerificationException(repositoryName, "can not initialize container " + blobStore.container(), e); + } + } +} diff --git a/plugins/cloud-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java b/plugins/cloud-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java new file mode 100644 index 00000000000..11d420b1b07 --- /dev/null +++ b/plugins/cloud-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.cloud.azure.AzureModule; +import org.elasticsearch.cloud.azure.storage.AzureStorageSettingsFilter; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; +import org.elasticsearch.repositories.Repository; + +/** + * Azure repository module + */ +public class AzureRepositoryModule extends AbstractModule { + + protected final ESLogger logger; + private Settings settings; + + public AzureRepositoryModule(Settings settings) { + super(); + this.logger = Loggers.getLogger(getClass(), settings); + this.settings = settings; + } + + /** + * {@inheritDoc} + */ + @Override + protected void configure() { + bind(AzureStorageSettingsFilter.class).asEagerSingleton(); + if (AzureModule.isSnapshotReady(settings, logger)) { + bind(Repository.class).to(AzureRepository.class).asEagerSingleton(); + bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton(); + } else { + logger.debug("disabling azure snapshot and restore features"); + } + } + +} + diff --git a/plugins/cloud-azure/src/main/resources/es-plugin.properties b/plugins/cloud-azure/src/main/resources/es-plugin.properties new file mode 100644 index 00000000000..3bcf1e294a7 --- /dev/null +++ b/plugins/cloud-azure/src/main/resources/es-plugin.properties @@ -0,0 +1,12 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with this work for additional +# information regarding copyright ownership. ElasticSearch licenses this file to you +# under the Apache License, Version 2.0 (the "License"); you may not use this +# file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by +# applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +plugin=org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin +version=${project.version} diff --git a/plugins/cloud-azure/src/test/java/org/apache/lucene/store/ElasticSearchBaseDirectoryTestCase.java b/plugins/cloud-azure/src/test/java/org/apache/lucene/store/ElasticSearchBaseDirectoryTestCase.java new file mode 100644 index 00000000000..32805f4f3aa --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/apache/lucene/store/ElasticSearchBaseDirectoryTestCase.java @@ -0,0 +1,43 @@ +package org.apache.lucene.store; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.bootstrap.BootstrapForTesting; +import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; + +import com.carrotsearch.randomizedtesting.annotations.Listeners; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + +/** + * Extends Lucene's BaseDirectoryTestCase with ES test behavior. + */ +@Listeners({ + ReproduceInfoPrinter.class +}) +@TimeoutSuite(millis = TimeUnits.HOUR) +@LuceneTestCase.SuppressReproduceLine +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") +public abstract class ElasticSearchBaseDirectoryTestCase extends BaseDirectoryTestCase { + static { + BootstrapForTesting.ensureInitialized(); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java b/plugins/cloud-azure/src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java new file mode 100644 index 00000000000..2b0124c2fbc --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.store; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbMMapDirectoryTest extends ElasticSearchBaseDirectoryTestCase { + + @Override + protected Directory getDirectory(Path file) throws IOException { + return new SmbDirectoryWrapper(new MMapDirectory(file)); + } +} \ No newline at end of file diff --git a/plugins/cloud-azure/src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java b/plugins/cloud-azure/src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java new file mode 100644 index 00000000000..ae65cd7447b --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.store; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbSimpleFSDirectoryTest extends ElasticSearchBaseDirectoryTestCase { + + @Override + protected Directory getDirectory(Path file) throws IOException { + return new SmbDirectoryWrapper(new SimpleFSDirectory(file)); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java new file mode 100644 index 00000000000..2ab09adac95 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.azure.itest; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; +import org.junit.Test; + +/** + * This test needs Azure to run and -Dtests.thirdparty=true to be set + * and -Des.config=/path/to/elasticsearch.yml + * @see org.elasticsearch.cloud.azure.AbstractAzureTest + */ +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.TEST, + numDataNodes = 1, + numClientNodes = 0, + transportClientRatio = 0.0) +public class AzureSimpleITest extends AbstractAzureTest { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + // For now we let the user who runs tests to define if he wants or not to run discovery tests + // by setting in elasticsearch.yml: discovery.type: azure + // .put("discovery.type", "azure") + .build(); + } + + @Test + public void one_node_should_run() { + // Do nothing... Just start :-) + // but let's check that we have at least 1 node (local node) + ClusterStateResponse clusterState = client().admin().cluster().prepareState().execute().actionGet(); + + assertThat(clusterState.getState().getNodes().getSize(), Matchers.greaterThanOrEqualTo(1)); + } + + @Override + public Settings indexSettings() { + // During restore we frequently restore index to exactly the same state it was before, that might cause the same + // checksum file to be written twice during restore operation + return Settings.builder().put(super.indexSettings()) + .build(); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java new file mode 100644 index 00000000000..7a2cc37d136 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.FailedToResolveConfigException; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ThirdParty; + +/** + * Base class for Azure tests that require credentials. + *

+ * You must specify {@code -Dtests.thirdparty=true -Dtests.config=/path/to/config} + * in order to run these tests. + */ +@ThirdParty +public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true) + .put(readSettingsFromFile()) + .build(); + } + + protected Settings readSettingsFromFile() { + Settings.Builder settings = Settings.builder(); + settings.put("path.home", createTempDir()); + Environment environment = new Environment(settings.build()); + + // if explicit, just load it and don't load from env + try { + if (Strings.hasText(System.getProperty("tests.config"))) { + settings.loadFromUrl(environment.resolveConfig(System.getProperty("tests.config"))); + } else { + throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); + } + } catch (FailedToResolveConfigException exception) { + throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception); + } + return settings.build(); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java new file mode 100644 index 00000000000..c11060a84a9 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Settings; + +/** + * + */ +public abstract class AzureComputeServiceAbstractMock extends AbstractLifecycleComponent + implements AzureComputeService { + + protected AzureComputeServiceAbstractMock(Settings settings) { + super(settings); + logger.debug("starting Azure Mock [{}]", this.getClass().getSimpleName()); + } + + @Override + protected void doStart() throws ElasticsearchException { + logger.debug("starting Azure Api Mock"); + } + + @Override + protected void doStop() throws ElasticsearchException { + logger.debug("stopping Azure Api Mock"); + } + + @Override + protected void doClose() throws ElasticsearchException { + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java new file mode 100644 index 00000000000..b34b5aeb8b1 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import com.microsoft.windowsazure.management.compute.models.*; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; + +import java.net.InetAddress; + +/** + * Mock Azure API with a single started node + */ +public class AzureComputeServiceSimpleMock extends AzureComputeServiceAbstractMock { + + @Inject + protected AzureComputeServiceSimpleMock(Settings settings) { + super(settings); + } + + @Override + public HostedServiceGetDetailedResponse getServiceDetails() { + HostedServiceGetDetailedResponse response = new HostedServiceGetDetailedResponse(); + HostedServiceGetDetailedResponse.Deployment deployment = new HostedServiceGetDetailedResponse.Deployment(); + + // Fake the deployment + deployment.setName("dummy"); + deployment.setDeploymentSlot(DeploymentSlot.Production); + deployment.setStatus(DeploymentStatus.Running); + + // Fake an instance + RoleInstance instance = new RoleInstance(); + instance.setInstanceName("dummy1"); + + // Fake the private IP + instance.setIPAddress(InetAddress.getLoopbackAddress()); + + // Fake the public IP + InstanceEndpoint endpoint = new InstanceEndpoint(); + endpoint.setName("elasticsearch"); + endpoint.setVirtualIPAddress(InetAddress.getLoopbackAddress()); + endpoint.setPort(9400); + instance.setInstanceEndpoints(CollectionUtils.newArrayList(endpoint)); + + deployment.setRoleInstances(CollectionUtils.newArrayList(instance)); + response.setDeployments(CollectionUtils.newArrayList(deployment)); + + return response; + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java new file mode 100644 index 00000000000..f6a4c567259 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import com.microsoft.windowsazure.management.compute.models.*; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; + +import java.net.InetAddress; + + +/** + * Mock Azure API with two started nodes + */ +public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstractMock { + + NetworkService networkService; + + @Inject + protected AzureComputeServiceTwoNodesMock(Settings settings, NetworkService networkService) { + super(settings); + this.networkService = networkService; + } + + @Override + public HostedServiceGetDetailedResponse getServiceDetails() { + HostedServiceGetDetailedResponse response = new HostedServiceGetDetailedResponse(); + HostedServiceGetDetailedResponse.Deployment deployment = new HostedServiceGetDetailedResponse.Deployment(); + + // Fake the deployment + deployment.setName("dummy"); + deployment.setDeploymentSlot(DeploymentSlot.Production); + deployment.setStatus(DeploymentStatus.Running); + + // Fake a first instance + RoleInstance instance1 = new RoleInstance(); + instance1.setInstanceName("dummy1"); + + // Fake the private IP + instance1.setIPAddress(InetAddress.getLoopbackAddress()); + + // Fake the public IP + InstanceEndpoint endpoint1 = new InstanceEndpoint(); + endpoint1.setName("elasticsearch"); + endpoint1.setVirtualIPAddress(InetAddress.getLoopbackAddress()); + endpoint1.setPort(9400); + instance1.setInstanceEndpoints(CollectionUtils.newArrayList(endpoint1)); + + // Fake a first instance + RoleInstance instance2 = new RoleInstance(); + instance2.setInstanceName("dummy1"); + + // Fake the private IP + instance2.setIPAddress(InetAddress.getLoopbackAddress()); + + // Fake the public IP + InstanceEndpoint endpoint2 = new InstanceEndpoint(); + endpoint2.setName("elasticsearch"); + endpoint2.setVirtualIPAddress(InetAddress.getLoopbackAddress()); + endpoint2.setPort(9401); + instance2.setInstanceEndpoints(CollectionUtils.newArrayList(endpoint2)); + + deployment.setRoleInstances(CollectionUtils.newArrayList(instance1, instance2)); + + response.setDeployments(CollectionUtils.newArrayList(deployment)); + + return response; + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java new file mode 100644 index 00000000000..96ee895267c --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.storage; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URISyntaxException; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * In memory storage for unit tests + */ +public class AzureStorageServiceMock extends AbstractLifecycleComponent + implements AzureStorageService { + + protected Map blobs = new ConcurrentHashMap<>(); + + @Inject + protected AzureStorageServiceMock(Settings settings) { + super(settings); + } + + @Override + public boolean doesContainerExist(String container) { + return true; + } + + @Override + public void removeContainer(String container) { + } + + @Override + public void createContainer(String container) { + } + + @Override + public void deleteFiles(String container, String path) { + } + + @Override + public boolean blobExists(String container, String blob) { + return blobs.containsKey(blob); + } + + @Override + public void deleteBlob(String container, String blob) { + blobs.remove(blob); + } + + @Override + public InputStream getInputStream(String container, String blob) { + return new ByteArrayInputStream(blobs.get(blob).toByteArray()); + } + + @Override + public OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + blobs.put(blob, outputStream); + return outputStream; + } + + @Override + public Map listBlobsByPrefix(String container, String keyPath, String prefix) { + MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + for (String blobName : blobs.keySet()) { + if (startsWithIgnoreCase(blobName, prefix)) { + blobsBuilder.put(blobName, new PlainBlobMetaData(blobName, blobs.get(blobName).size())); + } + } + return blobsBuilder.immutableMap(); + } + + @Override + public void moveBlob(String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException { + for (String blobName : blobs.keySet()) { + if (endsWithIgnoreCase(blobName, sourceBlob)) { + ByteArrayOutputStream outputStream = blobs.get(blobName); + blobs.put(blobName.replace(sourceBlob, targetBlob), outputStream); + blobs.remove(blobName); + } + } + } + + @Override + protected void doStart() throws ElasticsearchException { + } + + @Override + protected void doStop() throws ElasticsearchException { + } + + @Override + protected void doClose() throws ElasticsearchException { + } + + /** + * Test if the given String starts with the specified prefix, + * ignoring upper/lower case. + * + * @param str the String to check + * @param prefix the prefix to look for + * @see java.lang.String#startsWith + */ + public static boolean startsWithIgnoreCase(String str, String prefix) { + if (str == null || prefix == null) { + return false; + } + if (str.startsWith(prefix)) { + return true; + } + if (str.length() < prefix.length()) { + return false; + } + String lcStr = str.substring(0, prefix.length()).toLowerCase(Locale.ROOT); + String lcPrefix = prefix.toLowerCase(Locale.ROOT); + return lcStr.equals(lcPrefix); + } + + /** + * Test if the given String ends with the specified suffix, + * ignoring upper/lower case. + * + * @param str the String to check + * @param suffix the suffix to look for + * @see java.lang.String#startsWith + */ + public static boolean endsWithIgnoreCase(String str, String suffix) { + if (str == null || suffix == null) { + return false; + } + if (str.endsWith(suffix)) { + return true; + } + if (str.length() < suffix.length()) { + return false; + } + String lcStr = str.substring(0, suffix.length()).toLowerCase(Locale.ROOT); + String lcPrefix = suffix.toLowerCase(Locale.ROOT); + return lcStr.equals(lcPrefix); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java new file mode 100644 index 00000000000..cf3b7848d0e --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.ElasticsearchIntegrationTest; + +public abstract class AbstractAzureComputeServiceTest extends ElasticsearchIntegrationTest { + + private Class mock; + + public AbstractAzureComputeServiceTest(Class mock) { + // We want to inject the Azure API Mock + this.mock = mock; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settings = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true); + return settings.build(); + } + + protected void checkNumberOfNodes(int expected) { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().execute().actionGet(); + assertNotNull(nodeInfos); + assertNotNull(nodeInfos.getNodes()); + assertEquals(expected, nodeInfos.getNodes().length); + } + + protected Settings settingsBuilder() { + Settings.Builder builder = Settings.settingsBuilder() + .put("discovery.type", "azure") + .put(Management.API_IMPLEMENTATION, mock) + // We need the network to make the mock working + .put("node.mode", "network"); + + // We add a fake subscription_id to start mock compute service + builder.put(Management.SUBSCRIPTION_ID, "fake") + .put(Discovery.REFRESH, "5s") + .put(Management.KEYSTORE_PATH, "dummy") + .put(Management.KEYSTORE_PASSWORD, "dummy") + .put(Management.SERVICE_NAME, "dummy"); + + return builder.build(); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java new file mode 100644 index 00000000000..6cdbae94fe8 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.cloud.azure.management.AzureComputeServiceTwoNodesMock; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; + +import java.io.IOException; + +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +/** + * Reported issue in #15 + * (https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/15) + */ +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, + numDataNodes = 0, + transportClientRatio = 0.0, + numClientNodes = 0) +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-azure/issues/89") +public class AzureMinimumMasterNodesTest extends AbstractAzureComputeServiceTest { + + public AzureMinimumMasterNodesTest() { + super(AzureComputeServiceTwoNodesMock.class); + } + + @Override + protected final Settings settingsBuilder() { + Settings.Builder builder = Settings.settingsBuilder() + .put(super.settingsBuilder()) + .put("discovery.zen.minimum_master_nodes", 2) + // Make the test run faster + .put("discovery.zen.join.timeout", "50ms") + .put("discovery.zen.ping.timeout", "10ms") + .put("discovery.initial_state_timeout", "100ms"); + return builder.build(); + } + + @Test + public void simpleOnlyMasterNodeElection() throws IOException { + logger.info("--> start data node / non master node"); + internalCluster().startNode(settingsBuilder()); + try { + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue()); + fail("should not be able to find master"); + } catch (MasterNotDiscoveredException e) { + // all is well, no master elected + } + logger.info("--> start another node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + logger.info("--> stop master node"); + internalCluster().stopCurrentMasterNode(); + + try { + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), nullValue()); + fail("should not be able to find master"); + } catch (MasterNotDiscoveredException e) { + // all is well, no master elected + } + + logger.info("--> start another node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java new file mode 100644 index 00000000000..d68db9a1c83 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceSimpleMock; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; +import org.apache.lucene.util.LuceneTestCase.Slow; + +import static org.hamcrest.Matchers.notNullValue; + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, + numDataNodes = 0, + transportClientRatio = 0.0, + numClientNodes = 0) +@Slow +public class AzureSimpleTest extends AbstractAzureComputeServiceTest { + + public AzureSimpleTest() { + super(AzureComputeServiceSimpleMock.class); + } + + @Test + public void one_node_should_run_using_private_ip() { + Settings.Builder settings = Settings.settingsBuilder() + .put(Management.SERVICE_NAME, "dummy") + .put(Discovery.HOST_TYPE, "private_ip") + .put(super.settingsBuilder()); + + logger.info("--> start one node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 1 node as part of the cluster, let's test that + checkNumberOfNodes(1); + } + + @Test + public void one_node_should_run_using_public_ip() { + Settings.Builder settings = Settings.settingsBuilder() + .put(Management.SERVICE_NAME, "dummy") + .put(Discovery.HOST_TYPE, "public_ip") + .put(super.settingsBuilder()); + + logger.info("--> start one node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 1 node as part of the cluster, let's test that + checkNumberOfNodes(1); + } + + @Test + public void one_node_should_run_using_wrong_settings() { + Settings.Builder settings = Settings.settingsBuilder() + .put(Management.SERVICE_NAME, "dummy") + .put(Discovery.HOST_TYPE, "do_not_exist") + .put(super.settingsBuilder()); + + logger.info("--> start one node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 1 node as part of the cluster, let's test that + checkNumberOfNodes(1); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java new file mode 100644 index 00000000000..49eb96e7cdb --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceTwoNodesMock; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; +import org.apache.lucene.util.LuceneTestCase.Slow; + +import static org.hamcrest.Matchers.notNullValue; + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, + numDataNodes = 0, + transportClientRatio = 0.0, + numClientNodes = 0) +@Slow +public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { + + public AzureTwoStartedNodesTest() { + super(AzureComputeServiceTwoNodesMock.class); + } + + @Test + public void two_nodes_should_run_using_private_ip() { + Settings.Builder settings = Settings.settingsBuilder() + .put(Management.SERVICE_NAME, "dummy") + .put(Discovery.HOST_TYPE, "private_ip") + .put(super.settingsBuilder()); + + logger.info("--> start first node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + logger.info("--> start another node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 2 nodes as part of the cluster, let's test that + checkNumberOfNodes(2); + } + + @Test + public void two_nodes_should_run_using_public_ip() { + Settings.Builder settings = Settings.settingsBuilder() + .put(Management.SERVICE_NAME, "dummy") + .put(Discovery.HOST_TYPE, "public_ip") + .put(super.settingsBuilder()); + + logger.info("--> start first node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + logger.info("--> start another node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 2 nodes as part of the cluster, let's test that + checkNumberOfNodes(2); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java new file mode 100644 index 00000000000..497e807d60e --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import static org.hamcrest.Matchers.is; + +abstract public class AbstractAzureFsTest extends ElasticsearchIntegrationTest { + + @Test + public void testAzureFs() { + // Create an index and index some documents + createIndex("test"); + long nbDocs = randomIntBetween(10, 1000); + for (long i = 0; i < nbDocs; i++) { + index("test", "doc", "" + i, "foo", "bar"); + } + refresh(); + SearchResponse response = client().prepareSearch("test").get(); + assertThat(response.getHits().totalHits(), is(nbDocs)); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java new file mode 100644 index 00000000000..2b925a86820 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.common.settings.Settings; + + +public class SmbMMapFsTest extends AbstractAzureFsTest { + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put("index.store.type", "smb_mmap_fs") + .build(); + } + +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java new file mode 100644 index 00000000000..482075ad327 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.common.settings.Settings; + + +public class SmbSimpleFsTest extends AbstractAzureFsTest { + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put("index.store.type", "smb_simple_fs") + .build(); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java new file mode 100644 index 00000000000..56bad4cd0e3 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.test.store.MockFSDirectoryService; +import org.junit.After; +import org.junit.Before; + +import java.net.URISyntaxException; + +public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTest { + + protected String basePath; + private Class mock; + + public AbstractAzureRepositoryServiceTest(Class mock, + String basePath) { + // We want to inject the Azure API Mock + this.mock = mock; + this.basePath = basePath; + } + + /** + * Deletes repositories, supports wildcard notation. + */ + public static void wipeRepositories(String... repositories) { + // if nothing is provided, delete all + if (repositories.length == 0) { + repositories = new String[]{"*"}; + } + for (String repository : repositories) { + try { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } catch (RepositoryMissingException ex) { + // ignore + } + } + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.settingsBuilder() + .put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true) + .put(Storage.API_IMPLEMENTATION, mock) + .put(Storage.CONTAINER, "snapshots"); + + // We use sometime deprecated settings in tests + builder.put(Storage.ACCOUNT, "mock_azure_account") + .put(Storage.KEY, "mock_azure_key"); + + return builder.build(); + } + + @Override + public Settings indexSettings() { + // During restore we frequently restore index to exactly the same state it was before, that might cause the same + // checksum file to be written twice during restore operation + return Settings.builder().put(super.indexSettings()) + .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) + .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + } + + @Before @After + public final void wipe() throws StorageException, URISyntaxException { + wipeRepositories(); + cleanRepositoryFiles(basePath); + } + + /** + * Purge the test container + */ + public void cleanRepositoryFiles(String path) throws StorageException, URISyntaxException { + String container = internalCluster().getInstance(Settings.class).get("repositories.azure.container"); + logger.info("--> remove blobs in container [{}]", container); + AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); + client.deleteFiles(container, path); + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java new file mode 100644 index 00000000000..74ab9c92db3 --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -0,0 +1,474 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; +import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.repositories.azure.AzureRepository.Repository; +import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.store.MockFSDirectoryService; +import org.junit.*; + +import java.net.URISyntaxException; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * This test needs Azure to run and -Dtests.thirdparty=true to be set + * and -Dtests.config=/path/to/elasticsearch.yml + * @see org.elasticsearch.cloud.azure.AbstractAzureTest + */ +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.SUITE, + numDataNodes = 1, + transportClientRatio = 0.0) +public class AzureSnapshotRestoreITest extends AbstractAzureTest { + + private String getRepositoryPath() { + String testName = "it-".concat(Strings.toUnderscoreCase(getTestName()).replaceAll("_", "-")); + return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; + } + + private static String getContainerName() { + String testName = "snapshot-itest-".concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); + return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + // In snapshot tests, we explicitly disable cloud discovery + .put("discovery.type", "local") + .build(); + } + + @Override + public Settings indexSettings() { + // During restore we frequently restore index to exactly the same state it was before, that might cause the same + // checksum file to be written twice during restore operation + return Settings.builder().put(super.indexSettings()) + .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) + .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false) + .build(); + } + + @Before @After + public final void wipeAzureRepositories() throws StorageException, URISyntaxException { + wipeRepositories(); + cleanRepositoryFiles( + getContainerName(), + getContainerName().concat("-1"), + getContainerName().concat("-2")); + } + + @Test + public void testSimpleWorkflow() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(Settings.settingsBuilder() + .put(Storage.CONTAINER, getContainerName()) + .put(Storage.BASE_PATH, getRepositoryPath()) + .put(Storage.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> delete some data"); + for (int i = 0; i < 50; i++) { + client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); + } + for (int i = 50; i < 100; i++) { + client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); + } + for (int i = 0; i < 100; i += 2) { + client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + logger.info("--> close indices"); + client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + // Test restore after index deletion + logger.info("--> delete indices"); + cluster().wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion"); + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + } + + @Test + public void testMultipleRepositories() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); + PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") + .setType("azure").setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, getContainerName().concat("-1")) + .put(Repository.BASE_PATH, getRepositoryPath()) + .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); + PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") + .setType("azure").setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, getContainerName().concat("-2")) + .put(Repository.BASE_PATH, getRepositoryPath()) + .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse2.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + + logger.info("--> snapshot 1"); + CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1").get(); + assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards())); + + logger.info("--> snapshot 2"); + CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap").setWaitForCompletion(true).setIndices("test-idx-2").get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo1").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo2").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + // Test restore after index deletion + logger.info("--> delete indices"); + cluster().wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion from snapshot 1"); + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1").execute().actionGet(); + assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + + logger.info("--> restore other index after deletion from snapshot 2"); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap").setWaitForCompletion(true).setIndices("test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse2.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(true)); + } + + /** + * For issue #26: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/26 + */ + @Test + public void testListBlobs_26() throws StorageException, URISyntaxException { + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + + ClusterAdminClient client = client().admin().cluster(); + logger.info("--> creating azure repository without any path"); + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + .setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, getContainerName()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + // Get all snapshots - should be empty + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap-26").setWaitForCompletion(true).setIndices("test-idx-*").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + + // Get all snapshots - should have one + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + + // Clean the snapshot + client.prepareDeleteSnapshot("test-repo", "test-snap-26").get(); + client.prepareDeleteRepository("test-repo").get(); + + logger.info("--> creating azure repository path [{}]", getRepositoryPath()); + putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + .setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, getContainerName()) + .put(Repository.BASE_PATH, getRepositoryPath()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + // Get all snapshots - should be empty + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + + logger.info("--> snapshot"); + createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap-26").setWaitForCompletion(true).setIndices("test-idx-*").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + + // Get all snapshots - should have one + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + + + } + + /** + * For issue #28: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/28 + */ + @Test + public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { + ClusterAdminClient client = client().admin().cluster(); + logger.info("--> creating azure repository without any path"); + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + .setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, getContainerName()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + try { + client.prepareGetSnapshots("test-repo").addSnapshots("nonexistingsnapshotname").get(); + fail("Shouldn't be here"); + } catch (SnapshotMissingException ex) { + // Expected + } + + try { + client.prepareDeleteSnapshot("test-repo", "nonexistingsnapshotname").get(); + fail("Shouldn't be here"); + } catch (SnapshotMissingException ex) { + // Expected + } + } + + /** + * For issue #21: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21 + */ + @Test + public void testForbiddenContainerName() throws Exception { + checkContainerName("", false); + checkContainerName("es", false); + checkContainerName("-elasticsearch", false); + checkContainerName("elasticsearch--integration", false); + checkContainerName("elasticsearch_integration", false); + checkContainerName("ElAsTicsearch_integration", false); + checkContainerName("123456789-123456789-123456789-123456789-123456789-123456789-1234", false); + checkContainerName("123456789-123456789-123456789-123456789-123456789-123456789-123", true); + checkContainerName("elasticsearch-integration", true); + checkContainerName("elasticsearch-integration-007", true); + } + + /** + * Create repository with wrong or correct container name + * @param container Container name we want to create + * @param correct Is this container name correct + */ + private void checkContainerName(final String container, final boolean correct) throws Exception { + logger.info("--> creating azure repository with container name [{}]", container); + // It could happen that we just removed from a previous test the same container so + // we can not create it yet. + assertBusy(new Runnable() { + + public void run() { + try { + PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, container) + .put(Repository.BASE_PATH, getRepositoryPath()) + .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + client().admin().cluster().prepareDeleteRepository("test-repo").get(); + try { + logger.info("--> remove container [{}]", container); + cleanRepositoryFiles(container); + } catch (StorageException | URISyntaxException e) { + // We can ignore that as we just try to clean after the test + } + assertTrue(putRepositoryResponse.isAcknowledged() == correct); + } catch (RepositoryVerificationException e) { + if (correct) { + logger.debug(" -> container is being removed. Let's wait a bit..."); + fail(); + } + } + } + }, 5, TimeUnit.MINUTES); + } + + /** + * Test case for issue #23: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/23 + */ + @Test + public void testNonExistingRepo_23() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, getContainerName()) + .put(Repository.BASE_PATH, getRepositoryPath()) + .put(Repository.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + logger.info("--> restore non existing snapshot"); + try { + client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).execute().actionGet(); + fail("Shouldn't be here"); + } catch (SnapshotMissingException ex) { + // Expected + } + } + + /** + * When a user remove a container you can not immediately create it again. + */ + @Test + public void testRemoveAndCreateContainer() throws Exception { + final String container = getContainerName().concat("-testremove"); + final AzureStorageService storageService = internalCluster().getInstance(AzureStorageService.class); + + // It could happen that we run this test really close to a previous one + // so we might need some time to be able to create the container + assertBusy(new Runnable() { + + public void run() { + try { + storageService.createContainer(container); + logger.debug(" -> container created..."); + } catch (URISyntaxException e) { + // Incorrect URL. This should never happen. + fail(); + } catch (StorageException e) { + // It could happen. Let's wait for a while. + logger.debug(" -> container is being removed. Let's wait a bit..."); + fail(); + } + } + }, 30, TimeUnit.SECONDS); + storageService.removeContainer(container); + + ClusterAdminClient client = client().admin().cluster(); + logger.info("--> creating azure repository while container is being removed"); + try { + client.preparePutRepository("test-repo").setType("azure") + .setSettings(Settings.settingsBuilder() + .put(Repository.CONTAINER, container) + ).get(); + fail("we should get a RepositoryVerificationException"); + } catch (RepositoryVerificationException e) { + // Fine we expect that + } + } + + /** + * Deletes repositories, supports wildcard notation. + */ + public static void wipeRepositories(String... repositories) { + // if nothing is provided, delete all + if (repositories.length == 0) { + repositories = new String[]{"*"}; + } + for (String repository : repositories) { + try { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } catch (RepositoryMissingException ex) { + // ignore + } + } + } + + /** + * Purge the test containers + */ + public void cleanRepositoryFiles(String... containers) throws StorageException, URISyntaxException { + Settings settings = readSettingsFromFile(); + AzureStorageService client = new AzureStorageServiceImpl(settings); + for (String container : containers) { + client.removeContainer(container); + } + } +} diff --git a/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java b/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java new file mode 100644 index 00000000000..4c2a59124ce --- /dev/null +++ b/plugins/cloud-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + + +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cloud.azure.storage.AzureStorageServiceMock; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.SUITE, + numDataNodes = 1, + numClientNodes = 0, + transportClientRatio = 0.0) +public class AzureSnapshotRestoreTest extends AbstractAzureRepositoryServiceTest { + + public AzureSnapshotRestoreTest() { + super(AzureStorageServiceMock.class, "/snapshot-test/repo-" + randomInt()); + } + + @Test + public void testSimpleWorkflow() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", basePath); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(Settings.settingsBuilder() + .put("base_path", basePath) + .put("chunk_size", randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> delete some data"); + for (int i = 0; i < 50; i++) { + client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); + } + for (int i = 50; i < 100; i++) { + client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); + } + for (int i = 0; i < 100; i += 2) { + client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + logger.info("--> close indices"); + client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + // Test restore after index deletion + logger.info("--> delete indices"); + cluster().wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion"); + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + } + +}