From 598a64c990bb4b016a204ea61a05ffc280d3f166 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 2 Apr 2013 18:15:33 +0200 Subject: [PATCH 001/125] Initial commit --- README.md | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000000..5dd3b22b183 --- /dev/null +++ b/README.md @@ -0,0 +1,67 @@ +Azure Cloud Plugin for ElasticSearch +==================================== + +The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. + +In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0`. + + ----------------------------------------- + | Azure Cloud Plugin | ElasticSearch | + ----------------------------------------- + | master | 0.90 -> master | + ----------------------------------------- + | 1.0.0 | 0.20.6 | + ----------------------------------------- + + +Azure Virtual Machine Discovery +------------------------------- + +Azure VM discovery allows to use the azure APIs to perform automatic discovery (similar to multicast in non hostile +multicast environments). Here is a simple sample configuration: + +``` + cloud: + azure: + private_key: /path/to/private.key + certificate: /path/to/azure.certficate + password: your_password_for_pk + subscription_id: your_azure_subscription_id + discovery: + type: azure +``` + +How to start +------------ + +Short story: + + * Create Azure instances + * Open 9300 port + * Install Elasticsearch + * Install Azure plugin + * Modify `elasticsearch.yml` file + * Start Elasticsearch + +Long story: + + See [tutorial](http://www.elasticsearch.org/tutorials/2013/04/02/elasticsearch-on-azure/). + +License +------- + + This software is licensed under the Apache 2 license, quoted below. + + Copyright 2009-2012 Shay Banon and ElasticSearch + + Licensed under the Apache License, Version 2.0 (the "License"); you may not + use this file except in compliance with the License. You may obtain a copy of + the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations under + the License. From fba4f6b1a18a113ff4aec2cf6be221e735b62689 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 2 Apr 2013 18:16:30 +0200 Subject: [PATCH 002/125] Create elasticsearch-cloud-azure project --- .gitignore | 11 + CONTRIBUTING.md | 98 ++++++ LICENSE.txt | 202 ++++++++++++ NOTICE.txt | 5 + README.md | 297 +++++++++++++++++- pom.xml | 145 +++++++++ src/main/assemblies/plugin.xml | 38 +++ .../cloud/azure/AzureComputeService.java | 239 ++++++++++++++ .../cloud/azure/AzureModule.java | 33 ++ .../cloud/azure/AzureSettingsFilter.java | 37 +++ .../discovery/azure/AzureDiscovery.java | 68 ++++ .../discovery/azure/AzureDiscoveryModule.java | 34 ++ .../azure/AzureUnicastHostsProvider.java | 47 +++ .../plugin/cloud/azure/CloudAzurePlugin.java | 71 +++++ src/main/resources/es-plugin.properties | 12 + src/test/java/AzureSimpleTest.java | 58 ++++ src/test/resources/azure.crt | 3 + src/test/resources/azure.pk | 3 + src/test/resources/elasticsearch.yml | 29 ++ src/test/resources/log4j.xml | 39 +++ 20 files changed, 1455 insertions(+), 14 deletions(-) create mode 100644 .gitignore create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE.txt create mode 100644 NOTICE.txt create mode 100644 pom.xml create mode 100644 src/main/assemblies/plugin.xml create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureModule.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java create mode 100755 src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java create mode 100644 src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java create mode 100644 src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java create mode 100644 src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java create mode 100644 src/main/resources/es-plugin.properties create mode 100644 src/test/java/AzureSimpleTest.java create mode 100644 src/test/resources/azure.crt create mode 100644 src/test/resources/azure.pk create mode 100644 src/test/resources/elasticsearch.yml create mode 100644 src/test/resources/log4j.xml diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..9dac7bc0ead --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +/data +/work +/logs +/.idea +/target +.DS_Store +*.iml +/.settings +/.project +/.classpath +*.vmoptions diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..906de27afb8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,98 @@ +Contributing to elasticsearch +============================= + +Elasticsearch is an open source project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself. + +Bug reports +----------- + +If you think you have found a bug in Elasticsearch, first make sure that you are testing against the [latest version of Elasticsearch](http://www.elasticsearch.org/download/) - your issue may already have been fixed. If not, search our [issues list](https://github.com/elasticsearch/elasticsearch/issues) on GitHub in case a similar issue has already been opened. + +It is very helpful if you can prepare a reproduction of the bug. In other words, provide a small test case which we can run to confirm your bug. It makes it easier to find the problem and to fix it. Test cases should be provided as `curl` commands which we can copy and paste into a terminal to run it locally, for example: + +```sh +# delete the index +curl -XDELETE localhost:9200/test + +# insert a document +curl -XPUT localhost:9200/test/test/1 -d '{ + "title": "test document" +}' + +# this should return XXXX but instead returns YYY +curl .... +``` + +Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed. + +Feature requests +---------------- + +If you find yourself wishing for a feature that doesn't exist in Elasticsearch, you are probably not alone. There are bound to be others out there with similar needs. Many of the features that Elasticsearch has today have been added because our users saw the need. +Open an issue on our [issues list](https://github.com/elasticsearch/elasticsearch/issues) on GitHub which describes the feature you would like to see, why you need it, and how it should work. + +Contributing code and documentation changes +------------------------------------------- + +If you have a bugfix or new feature that you would like to contribute to Elasticsearch, please find or open an issue about it first. Talk about what you would like to do. It may be that somebody is already working on it, or that there are particular issues that you should know about before implementing the change. + +We enjoy working with contributors to get their code accepted. There are many approaches to fixing a problem and it is important to find the best approach before writing too much code. + +The process for contributing to any of the [Elasticsearch repositories](https://github.com/elasticsearch/) is similar. Details for individual projects can be found below. + +### Fork and clone the repository + +You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See +[github help page](https://help.github.com/articles/fork-a-repo) for help. + +Further instructions for specific projects are given below. + +### Submitting your changes + +Once your changes and tests are ready to submit for review: + +1. Test your changes +Run the test suite to make sure that nothing is broken. + +2. Sign the Contributor License Agreement +Please make sure you have signed our [Contributor License Agreement](http://www.elasticsearch.org/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once. + +3. Rebase your changes +Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest master branch. We prefer your changes to be squashed into a single commit. + +4. Submit a pull request +Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, describe what your changes do and mention the number of the issue where discussion has taken place, eg "Closes #123". + +Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch. + + +Contributing to the Elasticsearch plugin +---------------------------------------- + +**Repository:** [https://github.com/elasticsearch/elasticsearch-cloud-azure](https://github.com/elasticsearch/elasticsearch-cloud-azure) + +Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace`. + +Please follow these formatting guidelines: + +* Java indent is 4 spaces +* Line width is 140 characters +* The rest is left to Java coding standards +* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do. + +To create a distribution from the source, simply run: + +```sh +cd elasticsearch-cloud-azure/ +mvn clean package -DskipTests +``` + +You will find the newly built packages under: `./target/releases/`. + +Before submitting your changes, run the test suite to make sure that nothing is broken, with: + +```sh +mvn clean test +``` + +Source: [Contributing to elasticsearch](http://www.elasticsearch.org/contributing-to-elasticsearch/) diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 00000000000..86016475acd --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,5 @@ +ElasticSearch +Copyright 2009-2013 ElasticSearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/README.md b/README.md index 5dd3b22b183..e7b55c88fde 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ In order to install the plugin, simply run: `bin/plugin -install elasticsearch/e Azure Virtual Machine Discovery -------------------------------- +=============================== Azure VM discovery allows to use the azure APIs to perform automatic discovery (similar to multicast in non hostile multicast environments). Here is a simple sample configuration: @@ -31,28 +31,297 @@ multicast environments). Here is a simple sample configuration: type: azure ``` -How to start ------------- +How to start (short story) +-------------------------- -Short story: +* Create Azure instances +* Install Elasticsearch +* Install Azure plugin +* Modify `elasticsearch.yml` file +* Start Elasticsearch - * Create Azure instances - * Open 9300 port - * Install Elasticsearch - * Install Azure plugin - * Modify `elasticsearch.yml` file - * Start Elasticsearch +How to start (long story) +-------------------------- -Long story: +We will expose here one strategy which is to hide our Elasticsearch cluster from outside. + +With this strategy, only VM behind this same virtual port can talk to each other. +That means that with this mode, you can use elasticsearch unicast discovery to build a cluster. + +Best, you can use the `elasticsearch-cloud-azure` plugin to let it fetch information about your nodes using +azure API. + +### Prerequisites + +Before starting, you need to have: + +* A [Windows Azure account](http://www.windowsazure.com/) +* SSH keys and certificate + +Here is a description on how to generate this using `openssl`: + +```sh +# You may want to use another dir than /tmp +cd /tmp +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem +chmod 600 azure-private.key +openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer +``` + +See this [guide](http://www.windowsazure.com/en-us/manage/linux/how-to-guides/ssh-into-linux/) to have +more details on how to create keys for Azure. + +Once done, you need to upload your certificate in Azure: + +* Go to the [management console](https://account.windowsazure.com/). +* Sign in using your account. +* Click on `Portal`. +* Go to Settings (bottom of the left list) +* On the bottom bar, click on `Upload` and upload your `azure-certificate.cer` file. + +You may want to use [Windows Azure Command-Line Tool](http://www.windowsazure.com/en-us/develop/nodejs/how-to-guides/command-line-tools/): + +* Install [NodeJS](https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager), for example using +homebrew on MacOS X: + +```sh +brew install node +``` + +* Install Azure tools: + +```sh +sudo npm install azure-cli -g +``` + +* Download and import your azure settings: + +```sh +# This will open a browser and will download a .publishsettings file +azure account download + +# Import this file (we have downloaded it to /tmp) +# Note, it will create needed files in ~/.azure +azure account import /tmp/azure.publishsettings +``` + +### Creating your first instance + +You need to have a storage account available. Check [Azure Blob Storage documentation](http://www.windowsazure.com/en-us/develop/net/how-to-guides/blob-storage/#create-account) +for more information. + +You will need to choose the operating system you want to run on. To get a list of official available images, run: + +```sh +azure vm list +``` + +Let's say we are going to deploy an Ubuntu image on an extra small instance in West Europe: + +* Azure cluster name: `azure-elasticsearch-cluster` +* Image: `b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GB` +* VM Name: `myesnode1` +* VM Size: `extrasmall` +* Location: `West Europe` +* Login: `elasticsearch` + +Using command line: + +```sh +azure vm create azure-elasticsearch-cluster \ + b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GB \ + --vm-name myesnode1 \ + --location "West Europe" \ + --vm-size extrasmall \ + --ssh 22 \ + --ssh-cert /tmp/azure-certificate.pem \ + elasticsearch password +``` + +You should see something like: + +``` +info: Executing command vm create ++ Looking up image ++ Looking up cloud service ++ Creating cloud service ++ Retrieving storage accounts ++ Configuring certificate ++ Creating VM +info: vm create command OK +``` + +Now, your first instance is started. You need to install Elasticsearch on it. + +> **Note on SSH** +> +> You need to give the private key and username each time you log on your instance: +> +>```sh +>ssh -i ~/.ssh/azure-private.key elasticsearch@myescluster.cloudapp.net +>``` +> +> But you can also define it once in `~/.ssh/config` file: +> +>``` +>Host *.cloudapp.net +> User elasticsearch +> StrictHostKeyChecking no +> UserKnownHostsFile=/dev/null +> IdentityFile ~/.ssh/azure-private.key +>``` + + +```sh +# First, copy your azure certificate on this machine +scp /tmp/azure.publishsettings azure-elasticsearch-cluster.cloudapp.net:/tmp + +# Then, connect to your instance using SSH +ssh azure-elasticsearch-cluster.cloudapp.net +``` + +Once connected, install Elasticsearch: + +```sh +# Install Latest JDK +sudo apt-get update +sudo apt-get install openjdk-7-jre-headless + +# Download Elasticsearch +curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.3.deb -o elasticsearch-0.90.3.deb + +# Prepare Elasticsearch installation +sudo dpkg -i elasticsearch-0.90.3.deb +``` + +Check that elasticsearch is running: + +```sh +curl http://localhost:9200/ +``` + +This command should give you a JSON result: + +```javascript +{ + "ok" : true, + "status" : 200, + "name" : "Mandarin", + "version" : { + "number" : "0.90.3", + "build_hash" : "5c38d6076448b899d758f29443329571e2522410", + "build_timestamp" : "2013-08-06T13:18:31Z", + "build_snapshot" : false, + "lucene_version" : "4.4" + }, + "tagline" : "You Know, for Search" +} +``` + +### Install nodejs and Azure tools + +*TODO: check if there is a downloadable version of NodeJS* + +```sh +# Install node (aka download and compile source) +sudo apt-get update +sudo apt-get install python-software-properties python g++ make +sudo add-apt-repository ppa:chris-lea/node.js +sudo apt-get update +sudo apt-get install nodejs + +# Install Azure tools +sudo npm install azure-cli -g + +# Install your azure certficate +azure account import /tmp/azure.publishsettings + +# Remove tmp file +rm /tmp/azure.publishsettings +``` + +### Generate private keys for this instance + +```sh +openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem +chmod 600 azure-private.key +openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer +# Transform private key to PEM format +openssl pkcs8 -topk8 -nocrypt -in azure-private.key -inform PEM -out azure-pk.pem -outform PEM +# Transform certificate to PEM format +openssl x509 -inform der -in azure-certificate.cer -out azure-cert.pem +``` + +Upload the generated key to Azure platform + +```sh +azure service cert create azure-elasticsearch-cluster azure-certificate.cer +``` + +### Install elasticsearch cloud azure plugin + +```sh +# Stop elasticsearch +sudo service elasticsearch stop + +# Install the plugin (TODO : USE THE RIGHT VERSION NUMBER) +sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/0.1.0-SNAPSHOT + +# Configure it +sudo vi /etc/elasticsearch/elasticsearch.yml +``` + +And add the following lines: + +```yaml +# If you don't remember your account id, you may get it with `azure account list` + cloud: + azure: + private_key: /home/elasticsearch/azure-pk.pem + certificate: /home/elasticsearch/azure-cert.pem + subscription_id: your_azure_subscription_id + discovery: + type: azure +``` + +Restart elasticsearch: + +```sh +sudo service elasticsearch start +``` + +If anything goes wrong, check your logs in `/var/log/elasticsearch`. + + +TODO: Ask pierre for Azure commands + +Cloning your existing machine: + +```sh +azure .... +``` + + +Add a new machine: + +```sh +azure vm create -c myescluster --vm-name myesnode2 b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_04-amd64-server-20130501-en-us-30GB -l "West Europe" --vm-size extrasmall --ssh 22 elasticsearch fantastic0! +``` + +Add you certificate for this new instance. + +```sh +# Add certificate for this instance +azure service cert create myescluster1 azure-certificate.cer +``` - See [tutorial](http://www.elasticsearch.org/tutorials/2013/04/02/elasticsearch-on-azure/). License ------- - This software is licensed under the Apache 2 license, quoted below. +This software is licensed under the Apache 2 license, quoted below. - Copyright 2009-2012 Shay Banon and ElasticSearch + Copyright 2009-2013 ElasticSearch Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of diff --git a/pom.xml b/pom.xml new file mode 100644 index 00000000000..3baf27948e9 --- /dev/null +++ b/pom.xml @@ -0,0 +1,145 @@ + + + + + elasticsearch-cloud-azure + 4.0.0 + org.elasticsearch + elasticsearch-cloud-azure + 0.1.0-SNAPSHOT + jar + Azure Cloud plugin for ElasticSearch + 2013 + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git + scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git + + http://github.com/elasticsearch/elasticsearch-cloud-azure + + + + org.sonatype.oss + oss-parent + 7 + + + + 0.90.3 + 1.5.7 + + + + + org.elasticsearch + elasticsearch + ${elasticsearch.version} + + + + org.jclouds + jclouds-compute + ${jclouds.version} + + + org.jclouds.labs + azure-management + ${jclouds.version} + + + + log4j + log4j + 1.2.17 + test + + + junit + junit + 4.11 + test + + + + + + + + src/main/resources + true + + + + + src/test/resources + true + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.0 + + 1.6 + 1.6 + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.13 + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar + + + + + + maven-assembly-plugin + 2.4 + + false + ${project.build.directory}/releases/ + + ${basedir}/src/main/assemblies/plugin.xml + + + + + package + + single + + + + + + + diff --git a/src/main/assemblies/plugin.xml b/src/main/assemblies/plugin.xml new file mode 100644 index 00000000000..a434a8a1ed0 --- /dev/null +++ b/src/main/assemblies/plugin.xml @@ -0,0 +1,38 @@ + + + + + plugin + + zip + + false + + + / + true + true + + org.elasticsearch:elasticsearch + + + + / + true + true + + org.jclouds:jclouds-compute + org.jclouds.labs:azure-management + + + + diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java new file mode 100644 index 00000000000..cdfad72e854 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java @@ -0,0 +1,239 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import com.google.common.collect.ImmutableSet; +import com.google.inject.Module; +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Lists; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; +import org.elasticsearch.transport.TransportService; +import org.jclouds.Constants; +import org.jclouds.ContextBuilder; +import org.jclouds.azure.management.AzureManagementApi; +import org.jclouds.azure.management.AzureManagementApiMetadata; +import org.jclouds.azure.management.AzureManagementAsyncApi; +import org.jclouds.azure.management.config.AzureManagementProperties; +import org.jclouds.azure.management.domain.Deployment; +import org.jclouds.azure.management.domain.HostedServiceWithDetailedProperties; +import org.jclouds.logging.LoggingModules; +import org.jclouds.rest.RestContext; + +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.InetAddress; +import java.util.List; +import java.util.Properties; +import java.util.Scanner; +import java.util.Set; + +import static org.elasticsearch.common.Strings.cleanPath; + +/** + * + */ +public class AzureComputeService extends AbstractLifecycleComponent { + + static final class Fields { + private static final String ENDPOINT = "https://management.core.windows.net/"; + private static final String VERSION = "2012-08-01"; + private static final String SUBSCRIPTION_ID = "subscription_id"; + private static final String PASSWORD = "password"; + private static final String CERTIFICATE = "certificate"; + private static final String PRIVATE_KEY = "private_key"; + } + + private List discoNodes; + private TransportService transportService; + private NetworkService networkService; + + @Inject + public AzureComputeService(Settings settings, SettingsFilter settingsFilter, TransportService transportService, + NetworkService networkService) { + super(settings); + settingsFilter.addFilter(new AzureSettingsFilter()); + this.transportService = transportService; + this.networkService = networkService; + } + + /** + * We build the list of Nodes from Azure Management API + * @param client Azure Client + */ + private List buildNodes(RestContext client) { + List discoNodes = Lists.newArrayList(); + String ipAddress = null; + try { + InetAddress inetAddress = networkService.resolvePublishHostAddress(null); + if (inetAddress != null) { + ipAddress = inetAddress.getHostAddress(); + } + } catch (IOException e) { + // We can't find the publish host address... Hmmm. Too bad :-( + } + + Set response = client.getApi().getHostedServiceApi().list(); + + for (HostedServiceWithDetailedProperties hostedService : response) { + // Ask Azure for each IP address + Deployment deployment = client.getApi().getHostedServiceApi().getDeployment(hostedService.getName(), hostedService.getName()); + if (deployment != null) { + try { + if (deployment.getPrivateIpAddress().equals(ipAddress) || + deployment.getPublicIpAddress().equals(ipAddress)) { + // We found the current node. + // We can ignore it in the list of DiscoveryNode + // We can now set the public Address as the publish address (if not already set) + String publishHost = settings.get("transport.publish_host", settings.get("transport.host")); + if (!Strings.hasText(publishHost) || !deployment.getPublicIpAddress().equals(publishHost)) { + logger.info("you should define publish_host with {}", deployment.getPublicIpAddress()); + } + } else { + TransportAddress[] addresses = transportService.addressesFromString(deployment.getPublicIpAddress()); + // we only limit to 1 addresses, makes no sense to ping 100 ports + for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) { + logger.trace("adding {}, address {}, transport_address {}", hostedService.getName(), deployment.getPublicIpAddress(), addresses[i]); + discoNodes.add(new DiscoveryNode("#cloud-" + hostedService.getName() + "-" + i, addresses[i], Version.CURRENT)); + } + } + } catch (Exception e) { + logger.warn("failed to add {}, address {}", e, hostedService.getName(), deployment.getPublicIpAddress()); + } + } else { + logger.trace("ignoring {}", hostedService.getName()); + } + } + + return discoNodes; + } + + public synchronized List nodes() { + if (this.discoNodes != null) { + return this.discoNodes; + } + + String PK8_PATH = componentSettings.get(Fields.PRIVATE_KEY, settings.get("cloud." + Fields.PRIVATE_KEY)); + String CERTIFICATE_PATH = componentSettings.get(Fields.CERTIFICATE, settings.get("cloud." + Fields.CERTIFICATE)); + String PASSWORD = componentSettings.get(Fields.PASSWORD, settings.get("cloud" + Fields.PASSWORD, "")); + String SUBSCRIPTION_ID = componentSettings.get(Fields.SUBSCRIPTION_ID, settings.get("cloud." + Fields.SUBSCRIPTION_ID)); + + // Check that we have all needed properties + if (!checkProperty(Fields.SUBSCRIPTION_ID, SUBSCRIPTION_ID)) return null; + if (!checkProperty(Fields.CERTIFICATE, CERTIFICATE_PATH)) return null; + if (!checkProperty(Fields.PRIVATE_KEY, PK8_PATH)) return null; + + // Reading files from local disk + String pk8 = readFromFile(cleanPath(PK8_PATH)); + String cert = readFromFile(cleanPath(CERTIFICATE_PATH)); + + // Check file content + if (!checkProperty(Fields.CERTIFICATE, cert)) return null; + if (!checkProperty(Fields.PRIVATE_KEY, pk8)) return null; + + String IDENTITY = pk8 + cert; + + // We set properties used to create an Azure client + Properties overrides = new Properties(); + overrides.setProperty(Constants.PROPERTY_TRUST_ALL_CERTS, "true"); + overrides.setProperty(Constants.PROPERTY_RELAX_HOSTNAME, "true"); + overrides.setProperty("azure-management.identity", IDENTITY); + overrides.setProperty("azure-management.credential", PASSWORD); + overrides.setProperty("azure-management.endpoint", Fields.ENDPOINT + SUBSCRIPTION_ID); + overrides.setProperty("azure-management.api-version", Fields.VERSION); + overrides.setProperty("azure-management.build-version", ""); + overrides.setProperty(AzureManagementProperties.SUBSCRIPTION_ID, SUBSCRIPTION_ID); + + RestContext client = null; + + try { + client = ContextBuilder.newBuilder("azure-management") + .modules(ImmutableSet.of(LoggingModules.firstOrJDKLoggingModule())) + .overrides(overrides) + .build(AzureManagementApiMetadata.CONTEXT_TOKEN); + logger.debug("starting Azure discovery service for [{}]", SUBSCRIPTION_ID); + + this.discoNodes = buildNodes(client); + } catch (Throwable t) { + logger.warn("error while trying to find nodes for azure service [{}]: {}", SUBSCRIPTION_ID, t.getMessage()); + logger.debug("error found is: ", t); + // We create an empty list in that specific case. + // So discovery process won't fail with NPE but this node won't join any cluster + this.discoNodes = Lists.newArrayList(); + } finally { + if (client != null) client.close(); + } + + logger.debug("using dynamic discovery nodes {}", discoNodes); + return this.discoNodes; + } + + @Override + protected void doStart() throws ElasticSearchException { + } + + @Override + protected void doStop() throws ElasticSearchException { + } + + @Override + protected void doClose() throws ElasticSearchException { + } + + private String readFromFile(String path) { + try { + logger.trace("reading file content from [{}]", path); + + StringBuilder text = new StringBuilder(); + String NL = System.getProperty("line.separator"); + Scanner scanner = new Scanner(new FileInputStream(path), "UTF-8"); + try { + while (scanner.hasNextLine()){ + text.append(scanner.nextLine() + NL); + } + return text.toString(); + } + finally{ + scanner.close(); + } + } catch (FileNotFoundException e) { + logger.trace("file does not exist [{}]", path); + } + + return null; + } + + private boolean checkProperty(String name, String value) { + if (!Strings.hasText(value)) { + logger.warn("cloud.azure.{} is not set. Disabling azure discovery.", name); + return false; + } + return true; + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java new file mode 100644 index 00000000000..b9bf9bf402e --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -0,0 +1,33 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.common.inject.AbstractModule; + +/** + * + */ +public class AzureModule extends AbstractModule { + + @Override + protected void configure() { + bind(AzureComputeService.class).asEagerSingleton(); + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java new file mode 100644 index 00000000000..9728c2c253c --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -0,0 +1,37 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.SettingsFilter; + +/** + * + */ +public class AzureSettingsFilter implements SettingsFilter.Filter { + + @Override + public void filter(ImmutableSettings.Builder settings) { + settings.remove("cloud.private_key"); + settings.remove("cloud.certificate"); + settings.remove("cloud.azure.password"); + settings.remove("cloud.azure.subscription_id"); + } +} diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java new file mode 100755 index 00000000000..2af216eb4c8 --- /dev/null +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -0,0 +1,68 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.Version; +import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.node.DiscoveryNodeService; +import org.elasticsearch.common.collect.ImmutableList; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ping.ZenPing; +import org.elasticsearch.discovery.zen.ping.ZenPingService; +import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class AzureDiscovery extends ZenDiscovery { + + @Inject + public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + DiscoveryNodeService discoveryNodeService, AzureComputeService azureService) { + super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, discoveryNodeService, pingService, Version.CURRENT); + if (settings.getAsBoolean("cloud.enabled", true)) { + ImmutableList zenPings = pingService.zenPings(); + UnicastZenPing unicastZenPing = null; + for (ZenPing zenPing : zenPings) { + if (zenPing instanceof UnicastZenPing) { + unicastZenPing = (UnicastZenPing) zenPing; + break; + } + } + + if (unicastZenPing != null) { + // update the unicast zen ping to add cloud hosts provider + // and, while we are at it, use only it and not the multicast for example + unicastZenPing.addHostsProvider(new AzureUnicastHostsProvider(settings, azureService.nodes())); + pingService.zenPings(ImmutableList.of(unicastZenPing)); + } else { + logger.warn("failed to apply azure unicast discovery, no unicast ping found"); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java new file mode 100644 index 00000000000..cd46618d979 --- /dev/null +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java @@ -0,0 +1,34 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.zen.ZenDiscoveryModule; + +/** + * + */ +public class AzureDiscoveryModule extends ZenDiscoveryModule { + + @Override + protected void bindDiscovery() { + bind(Discovery.class).to(AzureDiscovery.class).asEagerSingleton(); + } +} diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java new file mode 100644 index 00000000000..5bc482b93d0 --- /dev/null +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -0,0 +1,47 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; + +import java.util.List; + +/** + * + */ +public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + private final List discoNodes; + + @Inject + public AzureUnicastHostsProvider(Settings settings, List discoNodes) { + super(settings); + this.discoNodes = discoNodes; + } + + @Override + public List buildDynamicNodes() { + return discoNodes; + } +} diff --git a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java new file mode 100644 index 00000000000..42d2d086cdc --- /dev/null +++ b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -0,0 +1,71 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.cloud.azure; + +import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.cloud.azure.AzureModule; +import org.elasticsearch.common.collect.Lists; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.AbstractPlugin; + +import java.util.Collection; + +/** + * + */ +public class CloudAzurePlugin extends AbstractPlugin { + + private final Settings settings; + + public CloudAzurePlugin(Settings settings) { + this.settings = settings; + } + + @Override + public String name() { + return "cloud-azure"; + } + + @Override + public String description() { + return "Cloud Azure Plugin"; + } + + @Override + public Collection> modules() { + Collection> modules = Lists.newArrayList(); + if (settings.getAsBoolean("cloud.enabled", true)) { + modules.add(AzureModule.class); + } + return modules; + } + + @Override + public Collection> services() { + Collection> services = Lists.newArrayList(); + if (settings.getAsBoolean("cloud.enabled", true)) { + services.add(AzureComputeService.class); + } + return services; + } + +} diff --git a/src/main/resources/es-plugin.properties b/src/main/resources/es-plugin.properties new file mode 100644 index 00000000000..21cdee38637 --- /dev/null +++ b/src/main/resources/es-plugin.properties @@ -0,0 +1,12 @@ +# Licensed to ElasticSearch under one or more contributor +# license agreements. See the NOTICE file distributed with this work for additional +# information regarding copyright ownership. ElasticSearch licenses this file to you +# under the Apache License, Version 2.0 (the "License"); you may not use this +# file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by +# applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +plugin=org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin +version=${project.version} diff --git a/src/test/java/AzureSimpleTest.java b/src/test/java/AzureSimpleTest.java new file mode 100644 index 00000000000..885f931afe8 --- /dev/null +++ b/src/test/java/AzureSimpleTest.java @@ -0,0 +1,58 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeBuilder; +import org.junit.Test; + +import java.io.File; + +/** + * To run this *test*, you need first to modify test/resources dir: + * - elasticsearch.yml: set your azure password and azure subscription_id + * - azure.crt: fill this file with your azure certificate (PEM format) + * - azure.pk: fill this file with your azure private key (PEM format) + */ +public class AzureSimpleTest { + + @Test + public void launchNode() { + File dataDir = new File("./target/es/data"); + if(dataDir.exists()) { + FileSystemUtils.deleteRecursively(dataDir, true); + } + + // Then we start our node for tests + Node node = NodeBuilder + .nodeBuilder() + .settings( + ImmutableSettings.settingsBuilder() + .put("gateway.type", "local") + .put("path.data", "./target/es/data") + .put("path.logs", "./target/es/logs") + .put("path.work", "./target/es/work") + ).node(); + + // We wait now for the yellow (or green) status +// node.client().admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(); + + } +} diff --git a/src/test/resources/azure.crt b/src/test/resources/azure.crt new file mode 100644 index 00000000000..1be5a80046a --- /dev/null +++ b/src/test/resources/azure.crt @@ -0,0 +1,3 @@ +-----BEGIN CERTIFICATE----- +YOUR-CERTIFICATE-HERE +-----END CERTIFICATE----- diff --git a/src/test/resources/azure.pk b/src/test/resources/azure.pk new file mode 100644 index 00000000000..6406c000190 --- /dev/null +++ b/src/test/resources/azure.pk @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +YOUR-PK-HERE +-----END PRIVATE KEY----- diff --git a/src/test/resources/elasticsearch.yml b/src/test/resources/elasticsearch.yml new file mode 100644 index 00000000000..9ad55935713 --- /dev/null +++ b/src/test/resources/elasticsearch.yml @@ -0,0 +1,29 @@ +# Licensed to ElasticSearch under one or more contributor +# license agreements. See the NOTICE file distributed with this work for additional +# information regarding copyright ownership. ElasticSearch licenses this file to you +# under the Apache License, Version 2.0 (the "License"); you may not use this +# file except in compliance with the License. You may obtain a copy of the +# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by +# applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + + cluster.name: azure +# Azure discovery allows to use Azure API in order to perform discovery. +# +# You have to install the cloud-azure plugin for enabling the Azure discovery. +# +# See +# for more information. +# +# See +# for a step-by-step tutorial. + cloud: + azure: + private_key: ${project.build.testOutputDirectory}/azure.pk + certificate: ${project.build.testOutputDirectory}/azure.crt + password: YOUR-PASSWORD + subscription_id: YOUR-AZURE-SUBSCRIPTION-ID + discovery: + type: azure diff --git a/src/test/resources/log4j.xml b/src/test/resources/log4j.xml new file mode 100644 index 00000000000..46249c145c5 --- /dev/null +++ b/src/test/resources/log4j.xml @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From a1f85f6d900ac6e296830fcbc26fcf4837877f43 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 6 Nov 2013 09:54:30 +0100 Subject: [PATCH 003/125] Preparing release for 1.0.0-Alpha --- README.md | 129 +++++++--- pom.xml | 16 +- src/main/assemblies/plugin.xml | 9 - .../cloud/azure/AzureComputeService.java | 219 +---------------- .../cloud/azure/AzureComputeServiceImpl.java | 221 ++++++++++++++++++ .../cloud/azure/AzureModule.java | 12 +- .../elasticsearch/cloud/azure/Instance.java | 113 +++++++++ .../discovery/azure/AzureDiscovery.java | 5 +- .../azure/AzureUnicastHostsProvider.java | 120 +++++++++- .../plugin/cloud/azure/CloudAzurePlugin.java | 12 - src/test/java/AzureSimpleTest.java | 58 ----- .../azure/itest/AzureSimpleITest.java | 37 +++ .../azure/test/AzureAbstractTest.java | 144 ++++++++++++ .../test/AzureComputeServiceAbstractMock.java | 50 ++++ .../test/AzureComputeServiceSimpleMock.java | 49 ++++ .../test/AzureComputeServiceTwoNodesMock.java | 51 ++++ .../test/AzureInstanceXmlParserTest.java | 61 +++++ .../azure/test/AzureSimpleTest.java | 37 +++ .../azure/test/AzureTwoStartedNodesTest.java | 38 +++ src/test/resources/azure.crt | 3 - src/test/resources/azure.pk | 3 - src/test/resources/elasticsearch.yml | 5 +- src/test/resources/log4j.xml | 6 +- .../org/elasticsearch/azure/test/services.xml | 170 ++++++++++++++ 24 files changed, 1211 insertions(+), 357 deletions(-) create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/Instance.java delete mode 100644 src/test/java/AzureSimpleTest.java create mode 100644 src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java create mode 100644 src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java delete mode 100644 src/test/resources/azure.crt delete mode 100644 src/test/resources/azure.pk create mode 100644 src/test/resources/org/elasticsearch/azure/test/services.xml diff --git a/README.md b/README.md index e7b55c88fde..f790ba5791a 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,27 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0`. - ----------------------------------------- - | Azure Cloud Plugin | ElasticSearch | - ----------------------------------------- - | master | 0.90 -> master | - ----------------------------------------- - | 1.0.0 | 0.20.6 | - ----------------------------------------- + + + + + + + + + + + + + + + + + + + + +
Azure Cloud PluginElasticSearchRelease date
1.1.0-SNAPSHOT (master)0.90.6
1.0.00.90.62013-11-12
Azure Virtual Machine Discovery @@ -23,10 +37,10 @@ multicast environments). Here is a simple sample configuration: ``` cloud: azure: - private_key: /path/to/private.key - certificate: /path/to/azure.certficate - password: your_password_for_pk + keystore: /path/to/keystore + password: your_password_for_keystore subscription_id: your_azure_subscription_id + service_name: your_azure_cloud_service_name discovery: type: azure ``` @@ -124,6 +138,7 @@ Let's say we are going to deploy an Ubuntu image on an extra small instance in W * VM Size: `extrasmall` * Location: `West Europe` * Login: `elasticsearch` +* Password: `password1234!!` Using command line: @@ -135,7 +150,7 @@ azure vm create azure-elasticsearch-cluster \ --vm-size extrasmall \ --ssh 22 \ --ssh-cert /tmp/azure-certificate.pem \ - elasticsearch password + elasticsearch password1234!! ``` You should see something like: @@ -183,15 +198,17 @@ ssh azure-elasticsearch-cluster.cloudapp.net Once connected, install Elasticsearch: ```sh -# Install Latest JDK +# Install Latest OpenJDK +# If you would like to use Oracle JDK instead, read the following: +# http://www.webupd8.org/2012/01/install-oracle-java-jdk-7-in-ubuntu-via.html sudo apt-get update sudo apt-get install openjdk-7-jre-headless # Download Elasticsearch -curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.3.deb -o elasticsearch-0.90.3.deb +curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.6.deb -o elasticsearch-0.90.6.deb # Prepare Elasticsearch installation -sudo dpkg -i elasticsearch-0.90.3.deb +sudo dpkg -i elasticsearch-0.90.6.deb ``` Check that elasticsearch is running: @@ -206,13 +223,13 @@ This command should give you a JSON result: { "ok" : true, "status" : 200, - "name" : "Mandarin", + "name" : "Grey, Dr. John", "version" : { - "number" : "0.90.3", - "build_hash" : "5c38d6076448b899d758f29443329571e2522410", - "build_timestamp" : "2013-08-06T13:18:31Z", + "number" : "0.90.6", + "build_hash" : "e2a24efdde0cb7cc1b2071ffbbd1fd874a6d8d6b", + "build_timestamp" : "2013-11-04T13:44:16Z", "build_snapshot" : false, - "lucene_version" : "4.4" + "lucene_version" : "4.5.1" }, "tagline" : "You Know, for Search" } @@ -220,8 +237,6 @@ This command should give you a JSON result: ### Install nodejs and Azure tools -*TODO: check if there is a downloadable version of NodeJS* - ```sh # Install node (aka download and compile source) sudo apt-get update @@ -240,7 +255,7 @@ azure account import /tmp/azure.publishsettings rm /tmp/azure.publishsettings ``` -### Generate private keys for this instance +### Generate a keystore ```sh openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem @@ -250,9 +265,13 @@ openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer openssl pkcs8 -topk8 -nocrypt -in azure-private.key -inform PEM -out azure-pk.pem -outform PEM # Transform certificate to PEM format openssl x509 -inform der -in azure-certificate.cer -out azure-cert.pem +cat azure-cert.pem azure-pk.pem > azure.pem.txt +# You MUST enter a password! +openssl pkcs12 -export -in azure.pem.txt -out azurekeystore.pkcs12 -name azure -noiter -nomaciter ``` -Upload the generated key to Azure platform +Upload the generated key to Azure platform. **Important**: when prompted for a password, +you need to enter a non empty one. ```sh azure service cert create azure-elasticsearch-cluster azure-certificate.cer @@ -264,8 +283,8 @@ azure service cert create azure-elasticsearch-cluster azure-certificate.cer # Stop elasticsearch sudo service elasticsearch stop -# Install the plugin (TODO : USE THE RIGHT VERSION NUMBER) -sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/0.1.0-SNAPSHOT +# Install the plugin +sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0 # Configure it sudo vi /etc/elasticsearch/elasticsearch.yml @@ -277,9 +296,10 @@ And add the following lines: # If you don't remember your account id, you may get it with `azure account list` cloud: azure: - private_key: /home/elasticsearch/azure-pk.pem - certificate: /home/elasticsearch/azure-cert.pem + keystore: /home/elasticsearch/azurekeystore.pkcs12 + password: your_password_for_keystore subscription_id: your_azure_subscription_id + service_name: your_azure_cloud_service_name discovery: type: azure ``` @@ -293,28 +313,63 @@ sudo service elasticsearch start If anything goes wrong, check your logs in `/var/log/elasticsearch`. -TODO: Ask pierre for Azure commands +Scaling Out! +------------ -Cloning your existing machine: +You need first to create an image of your previous machine. +Disconnect from your machine and run locally the following commands: ```sh -azure .... +# Shutdown the instance +azure vm shutdown myesnode1 + +# Create an image from this instance (it could take some minutes) +azure vm capture myesnode1 esnode-image --delete + +# Note that the previous instance has been deleted (mandatory) +# So you need to create it again and BTW create other instances. + +azure vm create azure-elasticsearch-cluster \ + esnode-image \ + --vm-name myesnode1 \ + --location "West Europe" \ + --vm-size extrasmall \ + --ssh 22 \ + --ssh-cert /tmp/azure-certificate.pem \ + elasticsearch password1234!! ``` +> **Note:** It could happen that azure changes the endpoint public IP address. +> DNS propagation could take some minutes before you can connect again using +> name. You can get from azure the IP address if needed, using: +> +> ```sh +> # Look at Network `Endpoints 0 Vip` +> azure vm show myesnode1 +> ``` -Add a new machine: +Let's start more instances! ```sh -azure vm create -c myescluster --vm-name myesnode2 b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_04-amd64-server-20130501-en-us-30GB -l "West Europe" --vm-size extrasmall --ssh 22 elasticsearch fantastic0! +for x in $(seq 2 10) + do + echo "Launching azure instance #$x..." + azure vm create azure-elasticsearch-cluster \ + esnode-image \ + --vm-name myesnode$x \ + --vm-size extrasmall \ + --ssh $((21 + $x)) \ + --ssh-cert /tmp/azure-certificate.pem \ + --connect \ + elasticsearch password1234!! + done ``` -Add you certificate for this new instance. +If you want to remove your running instances: -```sh -# Add certificate for this instance -azure service cert create myescluster1 azure-certificate.cer ``` - +azure vm delete myesnode1 +``` License ------- diff --git a/pom.xml b/pom.xml index 3baf27948e9..b20ecad75a5 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 0.1.0-SNAPSHOT + 1.0.0-SNAPSHOT jar Azure Cloud plugin for ElasticSearch 2013 @@ -42,8 +42,7 @@ governing permissions and limitations under the License. --> - 0.90.3 - 1.5.7 + 0.90.6 @@ -53,17 +52,6 @@ governing permissions and limitations under the License. --> ${elasticsearch.version} - - org.jclouds - jclouds-compute - ${jclouds.version} - - - org.jclouds.labs - azure-management - ${jclouds.version} - - log4j log4j diff --git a/src/main/assemblies/plugin.xml b/src/main/assemblies/plugin.xml index a434a8a1ed0..634831c235c 100644 --- a/src/main/assemblies/plugin.xml +++ b/src/main/assemblies/plugin.xml @@ -25,14 +25,5 @@ governing permissions and limitations under the License. --> org.elasticsearch:elasticsearch - - / - true - true - - org.jclouds:jclouds-compute - org.jclouds.labs:azure-management - - diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java index cdfad72e854..5f19ca22632 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java @@ -19,221 +19,22 @@ package org.elasticsearch.cloud.azure; -import com.google.common.collect.ImmutableSet; -import com.google.inject.Module; -import org.elasticsearch.ElasticSearchException; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Lists; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; -import org.elasticsearch.transport.TransportService; -import org.jclouds.Constants; -import org.jclouds.ContextBuilder; -import org.jclouds.azure.management.AzureManagementApi; -import org.jclouds.azure.management.AzureManagementApiMetadata; -import org.jclouds.azure.management.AzureManagementAsyncApi; -import org.jclouds.azure.management.config.AzureManagementProperties; -import org.jclouds.azure.management.domain.Deployment; -import org.jclouds.azure.management.domain.HostedServiceWithDetailedProperties; -import org.jclouds.logging.LoggingModules; -import org.jclouds.rest.RestContext; - -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.InetAddress; -import java.util.List; -import java.util.Properties; -import java.util.Scanner; import java.util.Set; -import static org.elasticsearch.common.Strings.cleanPath; - /** * */ -public class AzureComputeService extends AbstractLifecycleComponent { +public interface AzureComputeService { - static final class Fields { - private static final String ENDPOINT = "https://management.core.windows.net/"; - private static final String VERSION = "2012-08-01"; - private static final String SUBSCRIPTION_ID = "subscription_id"; - private static final String PASSWORD = "password"; - private static final String CERTIFICATE = "certificate"; - private static final String PRIVATE_KEY = "private_key"; + static public final class Fields { + public static final String SUBSCRIPTION_ID = "subscription_id"; + public static final String SERVICE_NAME = "service_name"; + public static final String KEYSTORE = "keystore"; + public static final String PASSWORD = "password"; + public static final String REFRESH = "refresh_interval"; + public static final String PORT_NAME = "port_name"; + public static final String HOST_TYPE = "host_type"; } - private List discoNodes; - private TransportService transportService; - private NetworkService networkService; - - @Inject - public AzureComputeService(Settings settings, SettingsFilter settingsFilter, TransportService transportService, - NetworkService networkService) { - super(settings); - settingsFilter.addFilter(new AzureSettingsFilter()); - this.transportService = transportService; - this.networkService = networkService; - } - - /** - * We build the list of Nodes from Azure Management API - * @param client Azure Client - */ - private List buildNodes(RestContext client) { - List discoNodes = Lists.newArrayList(); - String ipAddress = null; - try { - InetAddress inetAddress = networkService.resolvePublishHostAddress(null); - if (inetAddress != null) { - ipAddress = inetAddress.getHostAddress(); - } - } catch (IOException e) { - // We can't find the publish host address... Hmmm. Too bad :-( - } - - Set response = client.getApi().getHostedServiceApi().list(); - - for (HostedServiceWithDetailedProperties hostedService : response) { - // Ask Azure for each IP address - Deployment deployment = client.getApi().getHostedServiceApi().getDeployment(hostedService.getName(), hostedService.getName()); - if (deployment != null) { - try { - if (deployment.getPrivateIpAddress().equals(ipAddress) || - deployment.getPublicIpAddress().equals(ipAddress)) { - // We found the current node. - // We can ignore it in the list of DiscoveryNode - // We can now set the public Address as the publish address (if not already set) - String publishHost = settings.get("transport.publish_host", settings.get("transport.host")); - if (!Strings.hasText(publishHost) || !deployment.getPublicIpAddress().equals(publishHost)) { - logger.info("you should define publish_host with {}", deployment.getPublicIpAddress()); - } - } else { - TransportAddress[] addresses = transportService.addressesFromString(deployment.getPublicIpAddress()); - // we only limit to 1 addresses, makes no sense to ping 100 ports - for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) { - logger.trace("adding {}, address {}, transport_address {}", hostedService.getName(), deployment.getPublicIpAddress(), addresses[i]); - discoNodes.add(new DiscoveryNode("#cloud-" + hostedService.getName() + "-" + i, addresses[i], Version.CURRENT)); - } - } - } catch (Exception e) { - logger.warn("failed to add {}, address {}", e, hostedService.getName(), deployment.getPublicIpAddress()); - } - } else { - logger.trace("ignoring {}", hostedService.getName()); - } - } - - return discoNodes; - } - - public synchronized List nodes() { - if (this.discoNodes != null) { - return this.discoNodes; - } - - String PK8_PATH = componentSettings.get(Fields.PRIVATE_KEY, settings.get("cloud." + Fields.PRIVATE_KEY)); - String CERTIFICATE_PATH = componentSettings.get(Fields.CERTIFICATE, settings.get("cloud." + Fields.CERTIFICATE)); - String PASSWORD = componentSettings.get(Fields.PASSWORD, settings.get("cloud" + Fields.PASSWORD, "")); - String SUBSCRIPTION_ID = componentSettings.get(Fields.SUBSCRIPTION_ID, settings.get("cloud." + Fields.SUBSCRIPTION_ID)); - - // Check that we have all needed properties - if (!checkProperty(Fields.SUBSCRIPTION_ID, SUBSCRIPTION_ID)) return null; - if (!checkProperty(Fields.CERTIFICATE, CERTIFICATE_PATH)) return null; - if (!checkProperty(Fields.PRIVATE_KEY, PK8_PATH)) return null; - - // Reading files from local disk - String pk8 = readFromFile(cleanPath(PK8_PATH)); - String cert = readFromFile(cleanPath(CERTIFICATE_PATH)); - - // Check file content - if (!checkProperty(Fields.CERTIFICATE, cert)) return null; - if (!checkProperty(Fields.PRIVATE_KEY, pk8)) return null; - - String IDENTITY = pk8 + cert; - - // We set properties used to create an Azure client - Properties overrides = new Properties(); - overrides.setProperty(Constants.PROPERTY_TRUST_ALL_CERTS, "true"); - overrides.setProperty(Constants.PROPERTY_RELAX_HOSTNAME, "true"); - overrides.setProperty("azure-management.identity", IDENTITY); - overrides.setProperty("azure-management.credential", PASSWORD); - overrides.setProperty("azure-management.endpoint", Fields.ENDPOINT + SUBSCRIPTION_ID); - overrides.setProperty("azure-management.api-version", Fields.VERSION); - overrides.setProperty("azure-management.build-version", ""); - overrides.setProperty(AzureManagementProperties.SUBSCRIPTION_ID, SUBSCRIPTION_ID); - - RestContext client = null; - - try { - client = ContextBuilder.newBuilder("azure-management") - .modules(ImmutableSet.of(LoggingModules.firstOrJDKLoggingModule())) - .overrides(overrides) - .build(AzureManagementApiMetadata.CONTEXT_TOKEN); - logger.debug("starting Azure discovery service for [{}]", SUBSCRIPTION_ID); - - this.discoNodes = buildNodes(client); - } catch (Throwable t) { - logger.warn("error while trying to find nodes for azure service [{}]: {}", SUBSCRIPTION_ID, t.getMessage()); - logger.debug("error found is: ", t); - // We create an empty list in that specific case. - // So discovery process won't fail with NPE but this node won't join any cluster - this.discoNodes = Lists.newArrayList(); - } finally { - if (client != null) client.close(); - } - - logger.debug("using dynamic discovery nodes {}", discoNodes); - return this.discoNodes; - } - - @Override - protected void doStart() throws ElasticSearchException { - } - - @Override - protected void doStop() throws ElasticSearchException { - } - - @Override - protected void doClose() throws ElasticSearchException { - } - - private String readFromFile(String path) { - try { - logger.trace("reading file content from [{}]", path); - - StringBuilder text = new StringBuilder(); - String NL = System.getProperty("line.separator"); - Scanner scanner = new Scanner(new FileInputStream(path), "UTF-8"); - try { - while (scanner.hasNextLine()){ - text.append(scanner.nextLine() + NL); - } - return text.toString(); - } - finally{ - scanner.close(); - } - } catch (FileNotFoundException e) { - logger.trace("file does not exist [{}]", path); - } - - return null; - } - - private boolean checkProperty(String name, String value) { - if (!Strings.hasText(value)) { - logger.warn("cloud.azure.{} is not set. Disabling azure discovery.", name); - return false; - } - return true; - } + public Set instances(); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java new file mode 100644 index 00000000000..f44270df150 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -0,0 +1,221 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.settings.SettingsFilter; +import org.w3c.dom.Document; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.xml.sax.SAXException; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.xpath.XPath; +import javax.xml.xpath.XPathConstants; +import javax.xml.xpath.XPathExpressionException; +import javax.xml.xpath.XPathFactory; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.security.*; +import java.security.cert.CertificateException; +import java.util.HashSet; +import java.util.Set; + +/** + * + */ +public class AzureComputeServiceImpl extends AbstractLifecycleComponent + implements AzureComputeService { + + static final class Azure { + private static final String ENDPOINT = "https://management.core.windows.net/"; + private static final String VERSION = "2013-03-01"; + } + + private SSLSocketFactory socketFactory; + private final String keystore; + private final String password; + private final String subscription_id; + private final String service_name; + private final String port_name; + + @Inject + public AzureComputeServiceImpl(Settings settings, SettingsFilter settingsFilter) { + super(settings); + settingsFilter.addFilter(new AzureSettingsFilter()); + + // Creating socketFactory + subscription_id = componentSettings.get(Fields.SUBSCRIPTION_ID, settings.get("cloud.azure." + Fields.SUBSCRIPTION_ID)); + service_name = componentSettings.get(Fields.SERVICE_NAME, settings.get("cloud.azure." + Fields.SERVICE_NAME)); + keystore = componentSettings.get(Fields.KEYSTORE, settings.get("cloud.azure." + Fields.KEYSTORE)); + password = componentSettings.get(Fields.PASSWORD, settings.get("cloud.azure." + Fields.PASSWORD)); + port_name = componentSettings.get(Fields.PORT_NAME, settings.get("cloud.azure." + Fields.PORT_NAME, "elasticsearch")); + + // Check that we have all needed properties + try { + checkProperty(Fields.SUBSCRIPTION_ID, subscription_id); + checkProperty(Fields.SERVICE_NAME, service_name); + checkProperty(Fields.KEYSTORE, keystore); + checkProperty(Fields.PASSWORD, password); + socketFactory = getSocketFactory(keystore, password); + + if (logger.isTraceEnabled()) logger.trace("creating new Azure client for [{}], [{}], [{}], [{}]", + subscription_id, service_name, port_name); + } catch (Exception e) { + // Can not start Azure Client + logger.error("can not start azure client: {}", e.getMessage()); + socketFactory = null; + } + } + + private InputStream getXML(String api) throws UnrecoverableKeyException, NoSuchAlgorithmException, KeyStoreException, IOException, CertificateException, KeyManagementException { + String https_url = Azure.ENDPOINT + subscription_id + api; + + URL url = new URL( https_url ); + HttpsURLConnection con = (HttpsURLConnection) url.openConnection(); + con.setSSLSocketFactory( socketFactory ); + con.setRequestProperty("x-ms-version", Azure.VERSION); + + if (logger.isDebugEnabled()) logger.debug("calling azure REST API: {}", api); + if (logger.isTraceEnabled()) logger.trace("get {} from azure", https_url); + + return con.getInputStream(); + } + + @Override + public Set instances() { + if (socketFactory == null) { + // Azure plugin is disabled + if (logger.isTraceEnabled()) logger.trace("azure plugin is disabled. Returning an empty list of nodes."); + return new HashSet(); + } else { + try { + String SERVICE_NAME = componentSettings.get(Fields.SERVICE_NAME, settings.get("cloud." + Fields.SERVICE_NAME)); + InputStream stream = getXML("/services/hostedservices/" + SERVICE_NAME + "?embed-detail=true"); + Set instances = buildInstancesFromXml(stream, port_name); + if (logger.isTraceEnabled()) logger.trace("get instances from azure: {}", instances); + + return instances; + + } catch (ParserConfigurationException e) { + logger.warn("can not parse XML response: {}", e.getMessage()); + return new HashSet(); + } catch (XPathExpressionException e) { + logger.warn("can not parse XML response: {}", e.getMessage()); + return new HashSet(); + } catch (SAXException e) { + logger.warn("can not parse XML response: {}", e.getMessage()); + return new HashSet(); + } catch (Exception e) { + logger.warn("can not get list of azure nodes: {}", e.getMessage()); + return new HashSet(); + } + } + } + + private static String extractValueFromPath(Node node, String path) throws XPathExpressionException { + XPath xPath = XPathFactory.newInstance().newXPath(); + Node subnode = (Node) xPath.compile(path).evaluate(node, XPathConstants.NODE); + return subnode.getFirstChild().getNodeValue(); + } + + public static Set buildInstancesFromXml(InputStream inputStream, String port_name) throws ParserConfigurationException, IOException, SAXException, XPathExpressionException { + Set instances = new HashSet(); + + DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); + DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); + Document doc = dBuilder.parse(inputStream); + + doc.getDocumentElement().normalize(); + + XPath xPath = XPathFactory.newInstance().newXPath(); + + // We only fetch Started nodes (TODO: should we start with all nodes whatever the status is?) + String expression = "/HostedService/Deployments/Deployment/RoleInstanceList/RoleInstance[PowerState='Started']"; + NodeList nodeList = (NodeList) xPath.compile(expression).evaluate(doc, XPathConstants.NODESET); + for (int i = 0; i < nodeList.getLength(); i++) { + Instance instance = new Instance(); + Node node = nodeList.item(i); + instance.setPrivateIp(extractValueFromPath(node, "IpAddress")); + instance.setName(extractValueFromPath(node, "InstanceName")); + instance.setStatus(Instance.Status.STARTED); + + // Let's digg into + expression = "InstanceEndpoints/InstanceEndpoint[Name='"+ port_name +"']"; + NodeList endpoints = (NodeList) xPath.compile(expression).evaluate(node, XPathConstants.NODESET); + for (int j = 0; j < endpoints.getLength(); j++) { + Node endpoint = endpoints.item(j); + instance.setPublicIp(extractValueFromPath(endpoint, "Vip")); + instance.setPublicPort(extractValueFromPath(endpoint, "PublicPort")); + } + + instances.add(instance); + } + + return instances; + } + + private SSLSocketFactory getSocketFactory(String keystore, String password) throws NoSuchAlgorithmException, KeyStoreException, IOException, CertificateException, UnrecoverableKeyException, KeyManagementException { + File pKeyFile = new File(keystore); + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); + KeyStore keyStore = KeyStore.getInstance("PKCS12"); + InputStream keyInput = new FileInputStream(pKeyFile); + keyStore.load(keyInput, password.toCharArray()); + keyInput.close(); + keyManagerFactory.init(keyStore, password.toCharArray()); + + SSLContext context = SSLContext.getInstance("TLS"); + context.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom()); + return context.getSocketFactory(); + } + + @Override + protected void doStart() throws ElasticSearchException { + } + + @Override + protected void doStop() throws ElasticSearchException { + } + + @Override + protected void doClose() throws ElasticSearchException { + } + + private void checkProperty(String name, String value) throws ElasticSearchException { + if (!Strings.hasText(value)) { + throw new SettingsException("cloud.azure." + name +" is not set or is incorrect."); + } + } + +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java index b9bf9bf402e..7582a0c436a 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -20,14 +20,24 @@ package org.elasticsearch.cloud.azure; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; /** * */ public class AzureModule extends AbstractModule { + private Settings settings; + + @Inject + public AzureModule(Settings settings) { + this.settings = settings; + } @Override protected void configure() { - bind(AzureComputeService.class).asEagerSingleton(); + bind(AzureComputeService.class) + .to(settings.getAsClass("cloud.azure.api.impl", AzureComputeServiceImpl.class)) + .asEagerSingleton(); } } diff --git a/src/main/java/org/elasticsearch/cloud/azure/Instance.java b/src/main/java/org/elasticsearch/cloud/azure/Instance.java new file mode 100644 index 00000000000..03acacd3a94 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/Instance.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch (the "Author") under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Author licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +/** + * Define an Azure Instance + */ +public class Instance { + public static enum Status { + STARTED; + } + + private String privateIp; + private String publicIp; + private String publicPort; + private Status status; + private String name; + + public String getPrivateIp() { + return privateIp; + } + + public void setPrivateIp(String privateIp) { + this.privateIp = privateIp; + } + + public Status getStatus() { + return status; + } + + public void setStatus(Status status) { + this.status = status; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getPublicIp() { + return publicIp; + } + + public void setPublicIp(String publicIp) { + this.publicIp = publicIp; + } + + public String getPublicPort() { + return publicPort; + } + + public void setPublicPort(String publicPort) { + this.publicPort = publicPort; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Instance instance = (Instance) o; + + if (name != null ? !name.equals(instance.name) : instance.name != null) return false; + if (privateIp != null ? !privateIp.equals(instance.privateIp) : instance.privateIp != null) return false; + if (publicIp != null ? !publicIp.equals(instance.publicIp) : instance.publicIp != null) return false; + if (publicPort != null ? !publicPort.equals(instance.publicPort) : instance.publicPort != null) return false; + if (status != instance.status) return false; + + return true; + } + + @Override + public int hashCode() { + int result = privateIp != null ? privateIp.hashCode() : 0; + result = 31 * result + (publicIp != null ? publicIp.hashCode() : 0); + result = 31 * result + (publicPort != null ? publicPort.hashCode() : 0); + result = 31 * result + (status != null ? status.hashCode() : 0); + result = 31 * result + (name != null ? name.hashCode() : 0); + return result; + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("Instance{"); + sb.append("privateIp='").append(privateIp).append('\''); + sb.append(", publicIp='").append(publicIp).append('\''); + sb.append(", publicPort='").append(publicPort).append('\''); + sb.append(", status=").append(status); + sb.append(", name='").append(name).append('\''); + sb.append('}'); + return sb.toString(); + } +} diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 2af216eb4c8..6313e152383 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.common.collect.ImmutableList; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ping.ZenPing; @@ -43,7 +44,7 @@ public class AzureDiscovery extends ZenDiscovery { @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, - DiscoveryNodeService discoveryNodeService, AzureComputeService azureService) { + DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService) { super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, discoveryNodeService, pingService, Version.CURRENT); if (settings.getAsBoolean("cloud.enabled", true)) { ImmutableList zenPings = pingService.zenPings(); @@ -58,7 +59,7 @@ public class AzureDiscovery extends ZenDiscovery { if (unicastZenPing != null) { // update the unicast zen ping to add cloud hosts provider // and, while we are at it, use only it and not the multicast for example - unicastZenPing.addHostsProvider(new AzureUnicastHostsProvider(settings, azureService.nodes())); + unicastZenPing.addHostsProvider(new AzureUnicastHostsProvider(settings, azureService, transportService, networkService)); pingService.zenPings(ImmutableList.of(unicastZenPing)); } else { logger.warn("failed to apply azure unicast discovery, no unicast ping found"); diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index 5bc482b93d0..f4125a9a5f4 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -19,29 +19,141 @@ package org.elasticsearch.discovery.azure; +import org.elasticsearch.ElasticSearchIllegalArgumentException; +import org.elasticsearch.Version; +import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; +import org.elasticsearch.transport.TransportService; +import java.io.IOException; +import java.net.InetAddress; import java.util.List; +import java.util.Set; /** * */ public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { - private final List discoNodes; + public static enum HostType { + PRIVATE_IP, + PUBLIC_IP + } + + + private final AzureComputeService azureComputeService; + private TransportService transportService; + private NetworkService networkService; + + private final TimeValue refreshInterval; + private long lastRefresh; + private List cachedDiscoNodes; + private final HostType host_type; @Inject - public AzureUnicastHostsProvider(Settings settings, List discoNodes) { + public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, + TransportService transportService, + NetworkService networkService) { super(settings); - this.discoNodes = discoNodes; + this.azureComputeService = azureComputeService; + this.transportService = transportService; + this.networkService = networkService; + + this.refreshInterval = componentSettings.getAsTime(AzureComputeService.Fields.REFRESH, + settings.getAsTime("cloud.azure." + AzureComputeService.Fields.REFRESH, TimeValue.timeValueSeconds(0))); + this.host_type = HostType.valueOf(componentSettings.get(AzureComputeService.Fields.HOST_TYPE, + settings.get("cloud.azure." + AzureComputeService.Fields.HOST_TYPE, HostType.PRIVATE_IP.name())).toUpperCase()); + } + /** + * We build the list of Nodes from Azure Management API + * Information can be cached using `cloud.azure.refresh_interval` property if needed. + * Setting `cloud.azure.refresh_interval` to `-1` will cause infinite caching. + * Setting `cloud.azure.refresh_interval` to `0` will disable caching (default). + */ @Override public List buildDynamicNodes() { - return discoNodes; + if (refreshInterval.millis() != 0) { + if (cachedDiscoNodes != null && + (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { + if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list"); + return cachedDiscoNodes; + } + lastRefresh = System.currentTimeMillis(); + } + logger.debug("start building nodes list using Azure API"); + + cachedDiscoNodes = Lists.newArrayList(); + + Set response = azureComputeService.instances(); + + String ipAddress = null; + try { + InetAddress inetAddress = networkService.resolvePublishHostAddress(null); + if (inetAddress != null) { + ipAddress = inetAddress.getHostAddress(); + } + if (logger.isTraceEnabled()) logger.trace("ipAddress found: [{}]", ipAddress); + } catch (IOException e) { + // We can't find the publish host address... Hmmm. Too bad :-( + if (logger.isTraceEnabled()) logger.trace("exception while finding ipAddress", e); + } + + try { + for (Instance instance : response) { + String networkAddress = null; + // Let's detect if we want to use public or private IP + if (host_type == HostType.PRIVATE_IP) { + if (instance.getPrivateIp() != null) { + if (logger.isTraceEnabled() && instance.getPrivateIp().equals(ipAddress)) { + logger.trace("adding ourselves {}", ipAddress); + } + + networkAddress = instance.getPrivateIp(); + } else { + logger.trace("no private ip provided ignoring {}", instance.getName()); + } + } + if (host_type == HostType.PUBLIC_IP) { + if (instance.getPublicIp() != null && instance.getPublicPort() != null) { + networkAddress = instance.getPublicIp() + ":" +instance.getPublicPort(); + } else { + logger.trace("no public ip provided ignoring {}", instance.getName()); + } + } + + if (networkAddress == null) { + // We have a bad parameter here or not enough information from azure + throw new ElasticSearchIllegalArgumentException("can't find any " + host_type.name() + " address"); + } else { + TransportAddress[] addresses = transportService.addressesFromString(networkAddress); + // we only limit to 1 addresses, makes no sense to ping 100 ports + for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) { + logger.trace("adding {}, transport_address {}", networkAddress, addresses[i]); + cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getName() + "-" + i, addresses[i], Version.CURRENT)); + } + } + + } + } catch (Throwable e) { + logger.warn("Exception caught during discovery {} : {}", e.getClass().getName(), e.getMessage()); + logger.trace("Exception caught during discovery", e); + } + + logger.debug("{} node(s) added", cachedDiscoNodes.size()); + logger.debug("using dynamic discovery nodes {}", cachedDiscoNodes); + + return cachedDiscoNodes; } } diff --git a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java index 42d2d086cdc..fa98e848fde 100644 --- a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java +++ b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -19,10 +19,8 @@ package org.elasticsearch.plugin.cloud.azure; -import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.cloud.azure.AzureModule; import org.elasticsearch.common.collect.Lists; -import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.AbstractPlugin; @@ -58,14 +56,4 @@ public class CloudAzurePlugin extends AbstractPlugin { } return modules; } - - @Override - public Collection> services() { - Collection> services = Lists.newArrayList(); - if (settings.getAsBoolean("cloud.enabled", true)) { - services.add(AzureComputeService.class); - } - return services; - } - } diff --git a/src/test/java/AzureSimpleTest.java b/src/test/java/AzureSimpleTest.java deleted file mode 100644 index 885f931afe8..00000000000 --- a/src/test/java/AzureSimpleTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeBuilder; -import org.junit.Test; - -import java.io.File; - -/** - * To run this *test*, you need first to modify test/resources dir: - * - elasticsearch.yml: set your azure password and azure subscription_id - * - azure.crt: fill this file with your azure certificate (PEM format) - * - azure.pk: fill this file with your azure private key (PEM format) - */ -public class AzureSimpleTest { - - @Test - public void launchNode() { - File dataDir = new File("./target/es/data"); - if(dataDir.exists()) { - FileSystemUtils.deleteRecursively(dataDir, true); - } - - // Then we start our node for tests - Node node = NodeBuilder - .nodeBuilder() - .settings( - ImmutableSettings.settingsBuilder() - .put("gateway.type", "local") - .put("path.data", "./target/es/data") - .put("path.logs", "./target/es/logs") - .put("path.work", "./target/es/work") - ).node(); - - // We wait now for the yellow (or green) status -// node.client().admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet(); - - } -} diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java new file mode 100644 index 00000000000..59fafbb003f --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -0,0 +1,37 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.azure.itest; + +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.node.NodeBuilder; +import org.junit.Ignore; +import org.junit.Test; + +public class AzureSimpleITest { + + @Test @Ignore + public void one_node_should_run() { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + //.put("gateway.type", "local") + .put("path.data", "./target/es/data") + .put("path.logs", "./target/es/logs") + .put("path.work", "./target/es/work"); + NodeBuilder.nodeBuilder().settings(builder).node(); + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java b/src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java new file mode 100644 index 00000000000..c02f267d95f --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java @@ -0,0 +1,144 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.azure.test; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeBuilder; +import org.elasticsearch.node.internal.InternalNode; +import org.elasticsearch.transport.netty.NettyTransport; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; + +public abstract class AzureAbstractTest { + + protected static ESLogger logger = ESLoggerFactory.getLogger(AzureAbstractTest.class.getName()); + private static List nodes; + private Class mock; + + public AzureAbstractTest(Class mock) { + // We want to inject the Azure API Mock + this.mock = mock; + } + + @Before + public void setUp() { + nodes = new ArrayList(); + + File dataDir = new File("./target/es/data"); + if(dataDir.exists()) { + FileSystemUtils.deleteRecursively(dataDir, true); + } + } + + @After + public void tearDown() { + // Cleaning nodes after test + for (Node node : nodes) { + node.close(); + } + } + + protected Client getTransportClient() { + // Create a TransportClient on node 1 and 2 + Settings settings = ImmutableSettings.settingsBuilder() + .put("cluster.name", "azure") + .put("transport.tcp.connect_timeout", "1s") + .build(); + + TransportClient client = new TransportClient(settings); + + for (Node node : nodes) { + NettyTransport nettyTransport = ((InternalNode) node).injector().getInstance(NettyTransport.class); + TransportAddress transportAddress = nettyTransport.boundAddress().publishAddress(); + client.addTransportAddress(transportAddress); + } + + return client; + } + + protected Client getNodeClient() { + for (Node node : nodes) { + return node.client(); + } + + return null; + } + + protected void checkNumberOfNodes(int expected, boolean fail) { + NodesInfoResponse nodeInfos = null; + + try { + nodeInfos = getTransportClient().admin().cluster().prepareNodesInfo().execute().actionGet(); + } catch (NoNodeAvailableException e) { + // If we can't build a Transport Client, we are may be not connected to any network + // Let's try a Node Client + nodeInfos = getNodeClient().admin().cluster().prepareNodesInfo().execute().actionGet(); + } + + Assert.assertNotNull(nodeInfos); + Assert.assertNotNull(nodeInfos.getNodes()); + + if (fail) { + Assert.assertEquals(expected, nodeInfos.getNodes().length); + } else { + if (nodeInfos.getNodes().length != expected) { + logger.warn("expected {} node(s) but found {}. Could be due to no local IP address available.", + expected, nodeInfos.getNodes().length); + } + } + } + + protected void checkNumberOfNodes(int expected) { + checkNumberOfNodes(expected, true); + } + + protected void nodeBuilder() { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + //.put("gateway.type", "local") + .put("path.data", "./target/es/data") + .put("path.logs", "./target/es/logs") + .put("path.work", "./target/es/work") +// .put("discovery.zen.ping.timeout", "500ms") +// .put("discovery.zen.fd.ping_retries",1) +// .put("discovery.zen.fd.ping_timeout", "500ms") +// .put("discovery.initial_state_timeout", "5s") +// .put("transport.tcp.connect_timeout", "1s") + .put("cloud.azure.api.impl", mock) + .put("cloud.azure.refresh_interval", "5s") + .put("node.name", (nodes.size()+1) + "#" + mock.getSimpleName()); + Node node = NodeBuilder.nodeBuilder().settings(builder).node(); + nodes.add(node); + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java b/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java new file mode 100644 index 00000000000..b04524fec98 --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java @@ -0,0 +1,50 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.azure.test; + +import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Settings; + +/** + * + */ +public abstract class AzureComputeServiceAbstractMock extends AbstractLifecycleComponent + implements AzureComputeService { + + protected AzureComputeServiceAbstractMock(Settings settings) { + super(settings); + } + + @Override + protected void doStart() throws ElasticSearchException { + logger.debug("starting Azure Api Mock"); + } + + @Override + protected void doStop() throws ElasticSearchException { + logger.debug("stopping Azure Api Mock"); + } + + @Override + protected void doClose() throws ElasticSearchException { + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java b/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java new file mode 100644 index 00000000000..f3cac7a940f --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java @@ -0,0 +1,49 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.azure.test; + +import org.elasticsearch.cloud.azure.Instance; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; + +import java.util.HashSet; +import java.util.Set; + +/** + * Mock Azure API with a single started node + */ +public class AzureComputeServiceSimpleMock extends AzureComputeServiceAbstractMock { + + @Inject + protected AzureComputeServiceSimpleMock(Settings settings) { + super(settings); + logger.debug("starting Azure Mock"); + } + + @Override + public Set instances() { + Set instances = new HashSet(); + Instance azureHost = new Instance(); + azureHost.setPrivateIp("127.0.0.1"); + instances.add(azureHost); + + return instances; + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java b/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java new file mode 100644 index 00000000000..d94efc13f54 --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java @@ -0,0 +1,51 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.azure.test; + +import org.elasticsearch.cloud.azure.Instance; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; + +import java.util.HashSet; +import java.util.Set; + +/** + * Mock Azure API with two started nodes + */ +public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstractMock { + + @Inject + protected AzureComputeServiceTwoNodesMock(Settings settings) { + super(settings); + logger.debug("starting Azure Mock"); + } + + @Override + public Set instances() { + Set instances = new HashSet(); + Instance azureHost = new Instance(); + azureHost.setPrivateIp("127.0.0.1"); + instances.add(azureHost); + azureHost = new Instance(); + azureHost.setPrivateIp("127.0.0.1"); + instances.add(azureHost); + return instances; + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java b/src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java new file mode 100644 index 00000000000..d99e3edc74b --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch (the "Author") under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Author licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.azure.test; + +import org.elasticsearch.cloud.azure.AzureComputeServiceImpl; +import org.elasticsearch.cloud.azure.Instance; +import org.junit.Assert; +import org.junit.Test; +import org.xml.sax.SAXException; + +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.xpath.XPathExpressionException; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashSet; +import java.util.Set; + +public class AzureInstanceXmlParserTest { + + private Instance build(String name, String privateIpAddress, + String publicIpAddress, + String publicPort, + Instance.Status status) { + Instance instance = new Instance(); + instance.setName(name); + instance.setPrivateIp(privateIpAddress); + instance.setPublicIp(publicIpAddress); + instance.setPublicPort(publicPort); + instance.setStatus(status); + return instance; + } + + @Test + public void testReadXml() throws ParserConfigurationException, SAXException, XPathExpressionException, IOException { + InputStream inputStream = AzureInstanceXmlParserTest.class.getResourceAsStream("/org/elasticsearch/azure/test/services.xml"); + Set instances = AzureComputeServiceImpl.buildInstancesFromXml(inputStream, "elasticsearch"); + + Set expected = new HashSet(); + expected.add(build("es-windows2008", "10.53.250.55", null, null, Instance.Status.STARTED)); + expected.add(build("myesnode1", "10.53.218.75", "137.116.213.150", "9300", Instance.Status.STARTED)); + + Assert.assertArrayEquals(expected.toArray(), instances.toArray()); + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java b/src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java new file mode 100644 index 00000000000..2fee19dac90 --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java @@ -0,0 +1,37 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.azure.test; + +import org.junit.Test; + +public class AzureSimpleTest extends AzureAbstractTest { + + public AzureSimpleTest() { + super(AzureComputeServiceSimpleMock.class); + } + + @Test + public void one_node_should_run() { + // Then we start our node for tests + nodeBuilder(); + + // We expect having 2 nodes as part of the cluster, let's test that + checkNumberOfNodes(1); + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java new file mode 100644 index 00000000000..ae1e3ac8feb --- /dev/null +++ b/src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java @@ -0,0 +1,38 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.azure.test; + +import org.junit.Test; + +public class AzureTwoStartedNodesTest extends AzureAbstractTest { + + public AzureTwoStartedNodesTest() { + super(AzureComputeServiceTwoNodesMock.class); + } + + @Test + public void two_nodes_should_run() { + // Then we start our node for tests + nodeBuilder(); + nodeBuilder(); + + // We expect having 2 nodes as part of the cluster, let's test that + checkNumberOfNodes(2, false); + } +} diff --git a/src/test/resources/azure.crt b/src/test/resources/azure.crt deleted file mode 100644 index 1be5a80046a..00000000000 --- a/src/test/resources/azure.crt +++ /dev/null @@ -1,3 +0,0 @@ ------BEGIN CERTIFICATE----- -YOUR-CERTIFICATE-HERE ------END CERTIFICATE----- diff --git a/src/test/resources/azure.pk b/src/test/resources/azure.pk deleted file mode 100644 index 6406c000190..00000000000 --- a/src/test/resources/azure.pk +++ /dev/null @@ -1,3 +0,0 @@ ------BEGIN PRIVATE KEY----- -YOUR-PK-HERE ------END PRIVATE KEY----- diff --git a/src/test/resources/elasticsearch.yml b/src/test/resources/elasticsearch.yml index 9ad55935713..82583a481cb 100644 --- a/src/test/resources/elasticsearch.yml +++ b/src/test/resources/elasticsearch.yml @@ -10,6 +10,7 @@ # governing permissions and limitations under the License. cluster.name: azure + # Azure discovery allows to use Azure API in order to perform discovery. # # You have to install the cloud-azure plugin for enabling the Azure discovery. @@ -21,9 +22,9 @@ # for a step-by-step tutorial. cloud: azure: - private_key: ${project.build.testOutputDirectory}/azure.pk - certificate: ${project.build.testOutputDirectory}/azure.crt + keystore: FULLPATH-TO-YOUR-KEYSTORE password: YOUR-PASSWORD subscription_id: YOUR-AZURE-SUBSCRIPTION-ID + service_name: YOUR-AZURE-SERVICE-NAME discovery: type: azure diff --git a/src/test/resources/log4j.xml b/src/test/resources/log4j.xml index 46249c145c5..95b3bf51c77 100644 --- a/src/test/resources/log4j.xml +++ b/src/test/resources/log4j.xml @@ -16,7 +16,7 @@ governing permissions and limitations under the License. --> - + @@ -24,10 +24,10 @@ governing permissions and limitations under the License. --> - + - + diff --git a/src/test/resources/org/elasticsearch/azure/test/services.xml b/src/test/resources/org/elasticsearch/azure/test/services.xml new file mode 100644 index 00000000000..0b8bd9ab1f8 --- /dev/null +++ b/src/test/resources/org/elasticsearch/azure/test/services.xml @@ -0,0 +1,170 @@ + + + https://management.core.windows.net/a836d210-c681-4916-ae7d-9ca5f1b4906f/services/hostedservices/azure-elasticsearch-cluster + azure-elasticsearch-cluster + + Implicitly created hosted service + West Europe + + Created + 2013-09-23T12:45:45Z + 2013-09-23T13:42:46Z + + + + + azure-elasticsearch-cluster + Production + c6a690796e6b497a918487546193b94a + Running + + http://azure-elasticsearch-cluster.cloudapp.net/ + PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJlcy13aW5kb3dzMjAwOCI+DQogICAgPEluc3RhbmNlcyBjb3VudD0iMSIgLz4NCiAgPC9Sb2xlPg0KICA8Um9sZSBuYW1lPSJteWVzbm9kZTEiPg0KICAgIDxJbnN0YW5jZXMgY291bnQ9IjEiIC8+DQogIDwvUm9sZT4NCjwvU2VydmljZUNvbmZpZ3VyYXRpb24+ + + + es-windows2008 + es-windows2008 + ReadyRole + 0 + 0 + ExtraSmall + + 10.53.250.55 + + + PowerShell + 137.116.213.150 + 5986 + 5986 + tcp + + + Remote Desktop + 137.116.213.150 + 53867 + 3389 + tcp + + + Started + es-windows2008 + E6798DEACEAD4752825A2ABFC0A9A367389ED802 + + + myesnode1 + myesnode1 + ReadyRole + 0 + 0 + ExtraSmall + + 10.53.218.75 + + + ssh + 137.116.213.150 + 22 + 22 + tcp + + + elasticsearch + 137.116.213.150 + 9300 + 9300 + tcp + + + Started + myesnode1 + + + 1 + + + es-windows2008 + + PersistentVMRole + + + NetworkConfiguration + + + 5986 + PowerShell + 5986 + tcp + 137.116.213.150 + + + 3389 + Remote Desktop + 53867 + tcp + 137.116.213.150 + + + + + + + + ReadWrite + azure-elasticsearch-cluster-es-windows2008-0-201309231342520150 + http://portalvhdsv5skghwlmf765.blob.core.windows.net/vhds/azure-elasticsearch-cluster-es-windows2008-2013-09-23.vhd + a699494373c04fc0bc8f2bb1389d6106__Win2K8R2SP1-Datacenter-201308.02-en.us-127GB.vhd + Windows + + ExtraSmall + + + myesnode1 + + PersistentVMRole + + + NetworkConfiguration + + + 22 + ssh + 22 + tcp + 137.116.213.150 + + + + + + + + ReadWrite + azure-elasticsearch-cluster-myesnode1-0-201309231246380270 + http://azureelasticsearchcluste.blob.core.windows.net/vhd-store/myesnode1-0cc86cc84c0ed2e0.vhd + b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GB + Linux + + ExtraSmall + + + + false + false + 2013-09-23T12:46:27Z + 2013-09-23T20:38:58Z + + + 2013-08-27T08:00:00Z + 2013-09-27T20:00:00Z + PersistentVMUpdateScheduled + + + +
137.116.213.150
+ true + azure-elasticsearch-clusterContractContract +
+
+
+
+
From 0c185a19b24416c6ea8594edb0df26ecccdf7f30 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 6 Nov 2013 16:05:20 +0100 Subject: [PATCH 004/125] Fix after review: no need to ask for SERVICE_NAME twice --- .../org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index f44270df150..554af257ed2 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -121,8 +121,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent(); } else { try { - String SERVICE_NAME = componentSettings.get(Fields.SERVICE_NAME, settings.get("cloud." + Fields.SERVICE_NAME)); - InputStream stream = getXML("/services/hostedservices/" + SERVICE_NAME + "?embed-detail=true"); + InputStream stream = getXML("/services/hostedservices/" + service_name + "?embed-detail=true"); Set instances = buildInstancesFromXml(stream, port_name); if (logger.isTraceEnabled()) logger.trace("get instances from azure: {}", instances); From 92cda5e27b809019ae45fc0a1237511553d1cbe5 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 6 Nov 2013 16:09:25 +0100 Subject: [PATCH 005/125] prepare release elasticsearch-cloud-azure-1.0.0.alpha1 --- README.md | 8 ++++---- pom.xml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f790ba5791a..5d353b16d19 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Azure Cloud Plugin for ElasticSearch The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. -In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0`. +In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0.alpha1`. @@ -15,12 +15,12 @@ In order to install the plugin, simply run: `bin/plugin -install elasticsearch/e - + - + @@ -284,7 +284,7 @@ azure service cert create azure-elasticsearch-cluster azure-certificate.cer sudo service elasticsearch stop # Install the plugin -sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0 +sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0.alpha1 # Configure it sudo vi /etc/elasticsearch/elasticsearch.yml diff --git a/pom.xml b/pom.xml index b20ecad75a5..95b416ba97a 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ governing permissions and limitations under the License. --> 4.0.0org.elasticsearchelasticsearch-cloud-azure - 1.0.0-SNAPSHOT + 1.0.0.alpha1jarAzure Cloud plugin for ElasticSearch2013 From 41de615eb24287ae2a95890c4de5b1f00f2b0cb2 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 6 Nov 2013 16:19:32 +0100 Subject: [PATCH 006/125] prepare for next development iteration --- README.md | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5d353b16d19..e3cf9a89573 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ In order to install the plugin, simply run: `bin/plugin -install elasticsearch/e - +
1.1.0-SNAPSHOT (master)1.0.0-SNAPSHOT (master) 0.90.6
1.0.01.0.0.alpha1 0.90.6 2013-11-12
1.0.0.alpha1 0.90.62013-11-122013-11-06
diff --git a/pom.xml b/pom.xml index 95b416ba97a..b20ecad75a5 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 1.0.0.alpha1 + 1.0.0-SNAPSHOT jar Azure Cloud plugin for ElasticSearch 2013 From 316a1411d08fe03b8197a103bef021f65068d93e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 6 Jan 2014 23:28:22 +0100 Subject: [PATCH 007/125] Move tests to elasticsearch test framework Move tests to elasticsearch test framework. In addition to this, we want to refactor some package names to prepare next snapshot/restore feature (see #2). Closes #3. --- pom.xml | 131 +++++++++++++++- .../cloud/azure/AzureModule.java | 26 ++++ .../cloud/azure/AzureSettingsFilter.java | 5 +- .../discovery/azure/AzureDiscovery.java | 2 + .../discovery/azure/AzureDiscoveryModule.java | 18 +++ .../azure/itest/AzureSimpleITest.java | 40 +++-- .../azure/test/AzureAbstractTest.java | 144 ------------------ .../cloud/azure/AbstractAzureTest.java | 64 ++++++++ .../AbstractAzureComputeServiceTest.java | 53 +++++++ .../AzureComputeServiceAbstractMock.java | 3 +- .../azure}/AzureComputeServiceSimpleMock.java | 3 +- .../AzureComputeServiceTwoNodesMock.java | 3 +- .../azure}/AzureInstanceXmlParserTest.java | 2 +- .../azure}/AzureSimpleTest.java | 14 +- .../azure}/AzureTwoStartedNodesTest.java | 15 +- src/test/resources/elasticsearch.yml | 24 +-- 16 files changed, 355 insertions(+), 192 deletions(-) delete mode 100644 src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java create mode 100644 src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java create mode 100644 src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java rename src/test/java/org/elasticsearch/{azure/test => discovery/azure}/AzureComputeServiceAbstractMock.java (92%) rename src/test/java/org/elasticsearch/{azure/test => discovery/azure}/AzureComputeServiceSimpleMock.java (94%) rename src/test/java/org/elasticsearch/{azure/test => discovery/azure}/AzureComputeServiceTwoNodesMock.java (95%) rename src/test/java/org/elasticsearch/{azure/test => discovery/azure}/AzureInstanceXmlParserTest.java (98%) rename src/test/java/org/elasticsearch/{azure/test => discovery/azure}/AzureSimpleTest.java (76%) rename src/test/java/org/elasticsearch/{azure/test => discovery/azure}/AzureTwoStartedNodesTest.java (73%) diff --git a/pom.xml b/pom.xml index b20ecad75a5..b580ca66212 100644 --- a/pom.xml +++ b/pom.xml @@ -42,16 +42,30 @@ governing permissions and limitations under the License. --> - 0.90.6 + 1.0.0.RC1-SNAPSHOT + 4.6.0 + onerror + 1 + true + onerror + + + INFO + + org.apache.lucene + lucene-test-framework + ${lucene.version} + test + + org.elasticsearch elasticsearch ${elasticsearch.version} - log4j log4j @@ -59,12 +73,24 @@ governing permissions and limitations under the License. --> test - junit - junit - 4.11 + org.elasticsearch + elasticsearch + ${elasticsearch.version} + test-jar + test + + + org.hamcrest + hamcrest-core + 1.3.RC2 + test + + + org.hamcrest + hamcrest-library + 1.3.RC2 test - @@ -91,10 +117,103 @@ governing permissions and limitations under the License. --> 1.6 + + com.carrotsearch.randomizedtesting + junit4-maven-plugin + 2.0.14 + + + tests + test + + junit4 + + + 20 + pipe,warn + true + + + + + + + + + ${tests.jvms} + + + + + + + **/*Tests.class + **/*Test.class + + + **/Abstract*.class + **/*StressTest.class + + + ${tests.jvm.argline} + + + -Xmx512m + -Xss256k + -XX:MaxDirectMemorySize=512m + -Des.logger.prefix= + + ${tests.shuffle} + ${tests.verbose} + ${tests.seed} + ${tests.failfast} + + + ${tests.jvm.argline} + ${tests.iters} + ${tests.maxfailures} + ${tests.failfast} + ${tests.class} + ${tests.method} + ${tests.nightly} + ${tests.badapples} + ${tests.weekly} + ${tests.slow} + ${tests.azure} + ${tests.awaitsfix} + ${tests.slow} + ${tests.timeoutSuite} + ${tests.showSuccess} + ${tests.integration} + ${tests.cluster_seed} + ${tests.client.ratio} + ${env.ES_TEST_LOCAL} + ${es.node.mode} + ${es.config} + ${es.logger.level} + true + + + + + org.apache.maven.plugins maven-surefire-plugin 2.13 + + true + org.apache.maven.plugins diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java index 7582a0c436a..367481321f8 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -21,23 +21,49 @@ package org.elasticsearch.cloud.azure; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.azure.AzureDiscovery; /** + * Azure Module * + *
    + *
  • If needed this module will bind azure discovery service by default + * to AzureComputeServiceImpl.
  • + *
+ * + * @see org.elasticsearch.cloud.azure.AzureComputeServiceImpl */ public class AzureModule extends AbstractModule { + protected final ESLogger logger; private Settings settings; @Inject public AzureModule(Settings settings) { this.settings = settings; + this.logger = Loggers.getLogger(getClass(), settings); } @Override protected void configure() { + logger.debug("starting azure services"); + + // If we have set discovery to azure, let's start the azure compute service + if (isDiscoveryReady(settings)) { + logger.debug("starting azure discovery service"); bind(AzureComputeService.class) .to(settings.getAsClass("cloud.azure.api.impl", AzureComputeServiceImpl.class)) .asEagerSingleton(); + } + } + + /** + * Check if discovery is meant to start + * @return true if we can start discovery features + */ + public static boolean isDiscoveryReady(Settings settings) { + return (AzureDiscovery.AZURE.equalsIgnoreCase(settings.get("discovery.type"))); } } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java index 9728c2c253c..863a97874a2 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -23,15 +23,16 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.SettingsFilter; /** - * + * Filtering cloud.azure.* settings */ public class AzureSettingsFilter implements SettingsFilter.Filter { @Override public void filter(ImmutableSettings.Builder settings) { - settings.remove("cloud.private_key"); + // Cloud settings settings.remove("cloud.certificate"); settings.remove("cloud.azure.password"); settings.remove("cloud.azure.subscription_id"); + settings.remove("cloud.azure.service_name"); } } diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 6313e152383..c56a8632c06 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -41,6 +41,8 @@ import org.elasticsearch.transport.TransportService; */ public class AzureDiscovery extends ZenDiscovery { + public static final String AZURE = "azure"; + @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java index cd46618d979..1a51f214f4b 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java @@ -19,6 +19,11 @@ package org.elasticsearch.discovery.azure; +import org.elasticsearch.cloud.azure.AzureModule; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscoveryModule; @@ -27,8 +32,21 @@ import org.elasticsearch.discovery.zen.ZenDiscoveryModule; */ public class AzureDiscoveryModule extends ZenDiscoveryModule { + protected final ESLogger logger; + private Settings settings; + + @Inject + public AzureDiscoveryModule(Settings settings) { + super(); + this.logger = Loggers.getLogger(getClass(), settings); + this.settings = settings; + } @Override protected void bindDiscovery() { + if (AzureModule.isDiscoveryReady(settings)) { bind(Discovery.class).to(AzureDiscovery.class).asEagerSingleton(); + } else { + logger.debug("disabling azure discovery features"); + } } } diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java index 59fafbb003f..b96a604c32a 100644 --- a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -18,20 +18,40 @@ */ package org.elasticsearch.azure.itest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.node.NodeBuilder; -import org.junit.Ignore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; import org.junit.Test; -public class AzureSimpleITest { +/** + * This test needs Azure to run and -Dtests.azure=true to be set + * and -Des.config=/path/to/elasticsearch.yml + * @see org.elasticsearch.cloud.azure.AbstractAzureTest + */ +@AbstractAzureTest.AzureTest +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.TEST, + numNodes = 1, + transportClientRatio = 0.0) +public class AzureSimpleITest extends AbstractAzureTest { - @Test @Ignore + @Test public void one_node_should_run() { - ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() - //.put("gateway.type", "local") - .put("path.data", "./target/es/data") - .put("path.logs", "./target/es/logs") - .put("path.work", "./target/es/work"); - NodeBuilder.nodeBuilder().settings(builder).node(); + // Do nothing... Just start :-) + // but let's check that we have at least 1 node (local node) + ClusterStateResponse clusterState = client().admin().cluster().prepareState().execute().actionGet(); + + assertThat(clusterState.getState().getNodes().getSize(), Matchers.greaterThanOrEqualTo(1)); + } + + @Override + public Settings indexSettings() { + // During restore we frequently restore index to exactly the same state it was before, that might cause the same + // checksum file to be written twice during restore operation + return ImmutableSettings.builder().put(super.indexSettings()) + .build(); } } diff --git a/src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java b/src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java deleted file mode 100644 index c02f267d95f..00000000000 --- a/src/test/java/org/elasticsearch/azure/test/AzureAbstractTest.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.azure.test; - -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.cloud.azure.AzureComputeService; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeBuilder; -import org.elasticsearch.node.internal.InternalNode; -import org.elasticsearch.transport.netty.NettyTransport; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; - -public abstract class AzureAbstractTest { - - protected static ESLogger logger = ESLoggerFactory.getLogger(AzureAbstractTest.class.getName()); - private static List nodes; - private Class mock; - - public AzureAbstractTest(Class mock) { - // We want to inject the Azure API Mock - this.mock = mock; - } - - @Before - public void setUp() { - nodes = new ArrayList(); - - File dataDir = new File("./target/es/data"); - if(dataDir.exists()) { - FileSystemUtils.deleteRecursively(dataDir, true); - } - } - - @After - public void tearDown() { - // Cleaning nodes after test - for (Node node : nodes) { - node.close(); - } - } - - protected Client getTransportClient() { - // Create a TransportClient on node 1 and 2 - Settings settings = ImmutableSettings.settingsBuilder() - .put("cluster.name", "azure") - .put("transport.tcp.connect_timeout", "1s") - .build(); - - TransportClient client = new TransportClient(settings); - - for (Node node : nodes) { - NettyTransport nettyTransport = ((InternalNode) node).injector().getInstance(NettyTransport.class); - TransportAddress transportAddress = nettyTransport.boundAddress().publishAddress(); - client.addTransportAddress(transportAddress); - } - - return client; - } - - protected Client getNodeClient() { - for (Node node : nodes) { - return node.client(); - } - - return null; - } - - protected void checkNumberOfNodes(int expected, boolean fail) { - NodesInfoResponse nodeInfos = null; - - try { - nodeInfos = getTransportClient().admin().cluster().prepareNodesInfo().execute().actionGet(); - } catch (NoNodeAvailableException e) { - // If we can't build a Transport Client, we are may be not connected to any network - // Let's try a Node Client - nodeInfos = getNodeClient().admin().cluster().prepareNodesInfo().execute().actionGet(); - } - - Assert.assertNotNull(nodeInfos); - Assert.assertNotNull(nodeInfos.getNodes()); - - if (fail) { - Assert.assertEquals(expected, nodeInfos.getNodes().length); - } else { - if (nodeInfos.getNodes().length != expected) { - logger.warn("expected {} node(s) but found {}. Could be due to no local IP address available.", - expected, nodeInfos.getNodes().length); - } - } - } - - protected void checkNumberOfNodes(int expected) { - checkNumberOfNodes(expected, true); - } - - protected void nodeBuilder() { - ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() - //.put("gateway.type", "local") - .put("path.data", "./target/es/data") - .put("path.logs", "./target/es/logs") - .put("path.work", "./target/es/work") -// .put("discovery.zen.ping.timeout", "500ms") -// .put("discovery.zen.fd.ping_retries",1) -// .put("discovery.zen.fd.ping_timeout", "500ms") -// .put("discovery.initial_state_timeout", "5s") -// .put("transport.tcp.connect_timeout", "1s") - .put("cloud.azure.api.impl", mock) - .put("cloud.azure.refresh_interval", "5s") - .put("node.name", (nodes.size()+1) + "#" + mock.getSimpleName()); - Node node = NodeBuilder.nodeBuilder().settings(builder).node(); - nodes.add(node); - } -} diff --git a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java new file mode 100644 index 00000000000..58c69ebba76 --- /dev/null +++ b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch (the "Author") under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. Author licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import com.carrotsearch.randomizedtesting.annotations.TestGroup; +import org.elasticsearch.test.ElasticsearchIntegrationTest; + +import java.lang.annotation.Documented; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +/** + * + */ +public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { + + /** + * Annotation for tests that require Azure to run. Azure tests are disabled by default. + *

+ * To enable test add -Dtests.azure=true -Des.config=/path/to/elasticsearch.yml + *

+ * The elasticsearch.yml file should contain the following keys + *

+      cloud:
+          azure:
+              keystore: FULLPATH-TO-YOUR-KEYSTORE
+              password: YOUR-PASSWORD
+              subscription_id: YOUR-AZURE-SUBSCRIPTION-ID
+              service_name: YOUR-AZURE-SERVICE-NAME
+
+      discovery:
+              type: azure
+     * 
+ */ + @Documented + @Inherited + @Retention(RetentionPolicy.RUNTIME) + @TestGroup(enabled = false, sysProperty = SYSPROP_AZURE) + public @interface AzureTest { + } + + /** + */ + public static final String SYSPROP_AZURE = "tests.azure"; + +} diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java new file mode 100644 index 00000000000..c6586dfe1fc --- /dev/null +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -0,0 +1,53 @@ +/* + * Licensed to ElasticSearch under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. ElasticSearch licenses this + * file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; + +public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest { + + private Class mock; + + public AbstractAzureComputeServiceTest(Class mock) { + // We want to inject the Azure API Mock + this.mock = mock; + } + + protected void checkNumberOfNodes(int expected) { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().execute().actionGet(); + assertNotNull(nodeInfos); + assertNotNull(nodeInfos.getNodes()); + assertEquals(expected, nodeInfos.getNodes().length); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + .put("cloud.azure.api.impl", mock) + // We add a fake subscription_id to start mock compute service + .put("cloud.azure.subscription_id", "fake") + .put("cloud.azure.refresh_interval", "5s"); + + return builder.build(); + } +} diff --git a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java similarity index 92% rename from src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java rename to src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java index b04524fec98..142923f5853 100644 --- a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceAbstractMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.azure.test; +package org.elasticsearch.discovery.azure; import org.elasticsearch.ElasticSearchException; import org.elasticsearch.cloud.azure.AzureComputeService; @@ -32,6 +32,7 @@ public abstract class AzureComputeServiceAbstractMock extends AbstractLifecycleC protected AzureComputeServiceAbstractMock(Settings settings) { super(settings); + logger.debug("starting Azure Mock [{}]", this.getClass().getSimpleName()); } @Override diff --git a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java similarity index 94% rename from src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java rename to src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java index f3cac7a940f..c1b79e39c4b 100644 --- a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceSimpleMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.azure.test; +package org.elasticsearch.discovery.azure; import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.common.inject.Inject; @@ -34,7 +34,6 @@ public class AzureComputeServiceSimpleMock extends AzureComputeServiceAbstractMo @Inject protected AzureComputeServiceSimpleMock(Settings settings) { super(settings); - logger.debug("starting Azure Mock"); } @Override diff --git a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java similarity index 95% rename from src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java rename to src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java index d94efc13f54..8a60528846a 100644 --- a/src/test/java/org/elasticsearch/azure/test/AzureComputeServiceTwoNodesMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.azure.test; +package org.elasticsearch.discovery.azure; import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.common.inject.Inject; @@ -34,7 +34,6 @@ public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstract @Inject protected AzureComputeServiceTwoNodesMock(Settings settings) { super(settings); - logger.debug("starting Azure Mock"); } @Override diff --git a/src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java similarity index 98% rename from src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java rename to src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java index d99e3edc74b..dfe358d3254 100644 --- a/src/test/java/org/elasticsearch/azure/test/AzureInstanceXmlParserTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.azure.test; +package org.elasticsearch.discovery.azure; import org.elasticsearch.cloud.azure.AzureComputeServiceImpl; import org.elasticsearch.cloud.azure.Instance; diff --git a/src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java similarity index 76% rename from src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java rename to src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index 2fee19dac90..6bd7130300d 100644 --- a/src/test/java/org/elasticsearch/azure/test/AzureSimpleTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -16,11 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.azure.test; +package org.elasticsearch.discovery.azure; +import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -public class AzureSimpleTest extends AzureAbstractTest { +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.TEST, + numNodes = 1, + transportClientRatio = 0.0) +public class AzureSimpleTest extends AbstractAzureComputeServiceTest { public AzureSimpleTest() { super(AzureComputeServiceSimpleMock.class); @@ -28,10 +33,9 @@ public class AzureSimpleTest extends AzureAbstractTest { @Test public void one_node_should_run() { - // Then we start our node for tests - nodeBuilder(); - // We expect having 2 nodes as part of the cluster, let's test that checkNumberOfNodes(1); + + } } diff --git a/src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java similarity index 73% rename from src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java rename to src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index ae1e3ac8feb..a806f5e63cd 100644 --- a/src/test/java/org/elasticsearch/azure/test/AzureTwoStartedNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -16,11 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.azure.test; +package org.elasticsearch.discovery.azure; +import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -public class AzureTwoStartedNodesTest extends AzureAbstractTest { +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.TEST, + numNodes = 2, + transportClientRatio = 0.0) +public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { public AzureTwoStartedNodesTest() { super(AzureComputeServiceTwoNodesMock.class); @@ -28,11 +33,7 @@ public class AzureTwoStartedNodesTest extends AzureAbstractTest { @Test public void two_nodes_should_run() { - // Then we start our node for tests - nodeBuilder(); - nodeBuilder(); - // We expect having 2 nodes as part of the cluster, let's test that - checkNumberOfNodes(2, false); + checkNumberOfNodes(2); } } diff --git a/src/test/resources/elasticsearch.yml b/src/test/resources/elasticsearch.yml index 82583a481cb..07536b8c77b 100644 --- a/src/test/resources/elasticsearch.yml +++ b/src/test/resources/elasticsearch.yml @@ -8,9 +8,7 @@ # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. - - cluster.name: azure - +# # Azure discovery allows to use Azure API in order to perform discovery. # # You have to install the cloud-azure plugin for enabling the Azure discovery. @@ -18,13 +16,15 @@ # See # for more information. # -# See +# See # for a step-by-step tutorial. - cloud: - azure: - keystore: FULLPATH-TO-YOUR-KEYSTORE - password: YOUR-PASSWORD - subscription_id: YOUR-AZURE-SUBSCRIPTION-ID - service_name: YOUR-AZURE-SERVICE-NAME - discovery: - type: azure +# +# cloud: +# azure: +# keystore: FULLPATH-TO-YOUR-KEYSTORE +# password: YOUR-PASSWORD +# subscription_id: YOUR-AZURE-SUBSCRIPTION-ID +# service_name: YOUR-AZURE-SERVICE-NAME +# +# discovery: +# type: azure From 3c4d275969270c35c54a288833a3292e3fb45270 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 15 Jan 2014 15:48:00 +0100 Subject: [PATCH 008/125] Move tests to elasticsearch test framework Related to #3. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 9dac7bc0ead..87879ffa87d 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ /.project /.classpath *.vmoptions +.local-execution-hints.log From 3c592a07415038fad6aad943771c79fab08cb1b5 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 15 Jan 2014 15:58:50 +0100 Subject: [PATCH 009/125] Update README after review by Microsoft Azure team --- README.md | 62 ++++++++++++++----------------------------------------- 1 file changed, 16 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index e3cf9a89573..32cd930c13c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -Azure Cloud Plugin for ElasticSearch +Azure Cloud Plugin for Elasticearch ==================================== The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. @@ -80,8 +80,20 @@ cd /tmp openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem chmod 600 azure-private.key openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer + +# Generate a keystore (azurekeystore.pkcs12) +# Transform private key to PEM format +openssl pkcs8 -topk8 -nocrypt -in azure-private.key -inform PEM -out azure-pk.pem -outform PEM +# Transform certificate to PEM format +openssl x509 -inform der -in azure-certificate.cer -out azure-cert.pem +cat azure-cert.pem azure-pk.pem > azure.pem.txt +# You MUST enter a password! +openssl pkcs12 -export -in azure.pem.txt -out azurekeystore.pkcs12 -name azure -noiter -nomaciter ``` +Upload the generated key to Azure platform. **Important**: when prompted for a password, +you need to enter a non empty one. + See this [guide](http://www.windowsazure.com/en-us/manage/linux/how-to-guides/ssh-into-linux/) to have more details on how to create keys for Azure. @@ -115,7 +127,7 @@ sudo npm install azure-cli -g azure account download # Import this file (we have downloaded it to /tmp) -# Note, it will create needed files in ~/.azure +# Note, it will create needed files in ~/.azure. You can remove azure.publishsettings when done. azure account import /tmp/azure.publishsettings ``` @@ -188,8 +200,8 @@ Now, your first instance is started. You need to install Elasticsearch on it. ```sh -# First, copy your azure certificate on this machine -scp /tmp/azure.publishsettings azure-elasticsearch-cluster.cloudapp.net:/tmp +# First, copy your keystore on this machine +scp /tmp/azurekeystore.pkcs12 azure-elasticsearch-cluster.cloudapp.net:/home/elasticsearch # Then, connect to your instance using SSH ssh azure-elasticsearch-cluster.cloudapp.net @@ -235,48 +247,6 @@ This command should give you a JSON result: } ``` -### Install nodejs and Azure tools - -```sh -# Install node (aka download and compile source) -sudo apt-get update -sudo apt-get install python-software-properties python g++ make -sudo add-apt-repository ppa:chris-lea/node.js -sudo apt-get update -sudo apt-get install nodejs - -# Install Azure tools -sudo npm install azure-cli -g - -# Install your azure certficate -azure account import /tmp/azure.publishsettings - -# Remove tmp file -rm /tmp/azure.publishsettings -``` - -### Generate a keystore - -```sh -openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem -chmod 600 azure-private.key -openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer -# Transform private key to PEM format -openssl pkcs8 -topk8 -nocrypt -in azure-private.key -inform PEM -out azure-pk.pem -outform PEM -# Transform certificate to PEM format -openssl x509 -inform der -in azure-certificate.cer -out azure-cert.pem -cat azure-cert.pem azure-pk.pem > azure.pem.txt -# You MUST enter a password! -openssl pkcs12 -export -in azure.pem.txt -out azurekeystore.pkcs12 -name azure -noiter -nomaciter -``` - -Upload the generated key to Azure platform. **Important**: when prompted for a password, -you need to enter a non empty one. - -```sh -azure service cert create azure-elasticsearch-cluster azure-certificate.cer -``` - ### Install elasticsearch cloud azure plugin ```sh From 48b264172c861509fdbe778935c8cedd5a54331c Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 15 Jan 2014 16:04:02 +0100 Subject: [PATCH 010/125] update headers --- README.md | 29 ++++--------------- pom.xml | 2 +- .../cloud/azure/AzureComputeService.java | 14 ++++----- .../cloud/azure/AzureComputeServiceImpl.java | 14 ++++----- .../cloud/azure/AzureModule.java | 14 ++++----- .../cloud/azure/AzureSettingsFilter.java | 14 ++++----- .../elasticsearch/cloud/azure/Instance.java | 14 ++++----- .../discovery/azure/AzureDiscovery.java | 14 ++++----- .../discovery/azure/AzureDiscoveryModule.java | 14 ++++----- .../azure/AzureUnicastHostsProvider.java | 14 ++++----- .../plugin/cloud/azure/CloudAzurePlugin.java | 14 ++++----- .../azure/itest/AzureSimpleITest.java | 15 +++++----- .../cloud/azure/AbstractAzureTest.java | 14 ++++----- .../AbstractAzureComputeServiceTest.java | 15 +++++----- .../AzureComputeServiceAbstractMock.java | 14 ++++----- .../azure/AzureComputeServiceSimpleMock.java | 14 ++++----- .../AzureComputeServiceTwoNodesMock.java | 14 ++++----- .../azure/AzureInstanceXmlParserTest.java | 14 ++++----- .../discovery/azure/AzureSimpleTest.java | 15 +++++----- .../azure/AzureTwoStartedNodesTest.java | 15 +++++----- 20 files changed, 137 insertions(+), 150 deletions(-) diff --git a/README.md b/README.md index 32cd930c13c..9bbe68569a5 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,14 @@ -Azure Cloud Plugin for Elasticearch +Azure Cloud Plugin for Elasticsearch ==================================== The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0.alpha1`. - - - - - - - - - - - - - - - - - - - - -
Azure Cloud PluginElasticSearchRelease date
1.0.0-SNAPSHOT (master)0.90.6
1.0.0.alpha10.90.62013-11-06
+| Azure Cloud Plugin | elasticsearch | Release date | +|----------------------------|---------------------|:------------:| +| 1.0.0-SNAPSHOT (master) | 0.90.6 -> 0.90 | | +| 1.0.0.alpha1 | 0.90.6 -> 0.90 | 2013-11-06 | Azure Virtual Machine Discovery @@ -346,7 +329,7 @@ License This software is licensed under the Apache 2 license, quoted below. - Copyright 2009-2013 ElasticSearch + Copyright 2009-2014 Elasticsearch Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of diff --git a/pom.xml b/pom.xml index b580ca66212..daf6b11b9c4 100644 --- a/pom.xml +++ b/pom.xml @@ -42,7 +42,7 @@ governing permissions and limitations under the License. --> - 1.0.0.RC1-SNAPSHOT + 0.90.10 4.6.0 onerror 1 diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java index 5f19ca22632..fc7469b666a 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index 554af257ed2..de82f875846 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java index 367481321f8..1488ff9c9b2 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java index 863a97874a2..e56bb03c993 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/cloud/azure/Instance.java b/src/main/java/org/elasticsearch/cloud/azure/Instance.java index 03acacd3a94..e5721d51c72 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/Instance.java +++ b/src/main/java/org/elasticsearch/cloud/azure/Instance.java @@ -1,11 +1,11 @@ /* - * Licensed to Elasticsearch (the "Author") under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Author licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index c56a8632c06..51f8b991d0d 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java index 1a51f214f4b..8ebb674f432 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index f4125a9a5f4..3fbe5f71957 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java index fa98e848fde..0caedbccf54 100644 --- a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java +++ b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java index b96a604c32a..0591cc1fb88 100644 --- a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.azure.itest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; diff --git a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java index 58c69ebba76..8f34054e7f3 100644 --- a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java +++ b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -1,11 +1,11 @@ /* - * Licensed to Elasticsearch (the "Author") under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Author licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java index c6586dfe1fc..7e0cb73463f 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.discovery.azure; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java index 142923f5853..37b530ee9ba 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java index c1b79e39c4b..8a68a93db7c 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java index 8a60528846a..c829a6260d4 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java index dfe358d3254..bb8fbcf6700 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java @@ -1,11 +1,11 @@ /* - * Licensed to Elasticsearch (the "Author") under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. Author licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index 6bd7130300d..478a2a9f416 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.discovery.azure; import org.elasticsearch.test.ElasticsearchIntegrationTest; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index a806f5e63cd..7791b57dd08 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -1,11 +1,11 @@ /* - * Licensed to ElasticSearch under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. ElasticSearch licenses this - * file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.discovery.azure; import org.elasticsearch.test.ElasticsearchIntegrationTest; From 9f9d85ec825b9fb63151d9382644b8a2b78048ff Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 15 Jan 2014 16:29:55 +0100 Subject: [PATCH 011/125] Move tests to elasticsearch test framework Related to #3. --- pom.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/pom.xml b/pom.xml index daf6b11b9c4..abe885ec354 100644 --- a/pom.xml +++ b/pom.xml @@ -51,6 +51,7 @@ governing permissions and limitations under the License. --> INFO + ${project.build.testOutputDirectory}/elasticsearch.yml From fbbaf37620c7d58aca50b7f797e01b5bbb6867c1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 15 Jan 2014 16:06:45 +0100 Subject: [PATCH 012/125] prepare 1.x branch --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 9bbe68569a5..b2f5d1496ea 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,11 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0.alpha1`. -| Azure Cloud Plugin | elasticsearch | Release date | -|----------------------------|---------------------|:------------:| -| 1.0.0-SNAPSHOT (master) | 0.90.6 -> 0.90 | | -| 1.0.0.alpha1 | 0.90.6 -> 0.90 | 2013-11-06 | +| Azure Cloud Plugin | elasticsearch | Release date | +|-----------------------------|---------------------|:------------:| +| 2.0.0.RC1-SNAPSHOT (master) | 1.0.0.RC1 -> master | | +| 1.0.0-SNAPSHOT (1.x) | 0.90.6 -> 0.90 | | +| 1.0.0.alpha1 | 0.90.6 -> 0.90 | 2013-11-06 | Azure Virtual Machine Discovery From fbf9b0049ce42108cf248ca349a10fa8d503a0e4 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 15 Jan 2014 16:11:53 +0100 Subject: [PATCH 013/125] Update to elasticsearch 1.0.0.RC1 Closes #4. --- pom.xml | 8 ++++---- .../cloud/azure/AzureComputeServiceImpl.java | 10 +++++----- .../discovery/azure/AzureUnicastHostsProvider.java | 4 ++-- .../azure/AzureComputeServiceAbstractMock.java | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pom.xml b/pom.xml index abe885ec354..3a463e8edde 100644 --- a/pom.xml +++ b/pom.xml @@ -1,5 +1,5 @@ - 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 1.0.0-SNAPSHOT + 2.0.0.RC1-SNAPSHOT jar - Azure Cloud plugin for ElasticSearch + Azure Cloud plugin for Elasticsearch 2013 @@ -42,7 +42,7 @@ governing permissions and limitations under the License. --> - 0.90.10 + 1.0.0.RC1-SNAPSHOT 4.6.0 onerror 1 diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index de82f875846..60428c35873 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -19,7 +19,7 @@ package org.elasticsearch.cloud.azure; -import org.elasticsearch.ElasticSearchException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -200,18 +200,18 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent Date: Wed, 15 Jan 2014 16:50:36 +0100 Subject: [PATCH 014/125] setting path.data to /mnt/resource/... is highly recommended --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b2f5d1496ea..b09b7411045 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,9 @@ multicast environments). Here is a simple sample configuration: service_name: your_azure_cloud_service_name discovery: type: azure + + # recommended + # path.data: /mnt/resource/elasticsearch/data ``` How to start (short story) @@ -256,6 +259,9 @@ And add the following lines: service_name: your_azure_cloud_service_name discovery: type: azure + +# Recommended + path.data: /mnt/resource/elasticsearch/data ``` Restart elasticsearch: From 647312617bc9b281930626f1d8ab59a75260af0d Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 27 Jan 2014 09:46:30 +0100 Subject: [PATCH 015/125] Update README (chmod pk / certificate) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b09b7411045..0605f91b8b2 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ Here is a description on how to generate this using `openssl`: # You may want to use another dir than /tmp cd /tmp openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem -chmod 600 azure-private.key +chmod 600 azure-private.key azure-certificate.pem openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer # Generate a keystore (azurekeystore.pkcs12) From f06e54e59a79d804bf456ba2d3ff4a701e053cb7 Mon Sep 17 00:00:00 2001 From: Andrew Raines Date: Mon, 10 Feb 2014 12:31:40 -0600 Subject: [PATCH 016/125] Add prerequisite about openssl version --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 0605f91b8b2..d79fd6ae931 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,9 @@ Before starting, you need to have: * A [Windows Azure account](http://www.windowsazure.com/) * SSH keys and certificate +* OpenSSL that isn't from MacPorts, specifically `OpenSSL 1.0.1f 6 Jan + 2014` doesn't seem to create a valid keypair for ssh. FWIW, + `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 12.04 LTS is known to work. Here is a description on how to generate this using `openssl`: From 96c60413b8e4f617fbd3a70f19b777d933ba46a6 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 14 Feb 2014 18:33:45 +0100 Subject: [PATCH 017/125] prepare release elasticsearch-cloud-azure-2.0.0 --- README.md | 25 ++++++++++++++----------- pom.xml | 6 +++--- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index d79fd6ae931..90ed2d6d125 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,15 @@ Azure Cloud Plugin for Elasticsearch The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. -In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0.alpha1`. +In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.0.0`. + +For 1.0.x elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). +For 0.90.x elasticsearch versions, look at [1.x branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/1.x). | Azure Cloud Plugin | elasticsearch | Release date | |-----------------------------|---------------------|:------------:| -| 2.0.0.RC1-SNAPSHOT (master) | 1.0.0.RC1 -> master | | +| 2.0.0.RC1-SNAPSHOT (master) | 1.0 -> master | | +| 2.0.0 | 1.0 -> master | 2014-02-14 | | 1.0.0-SNAPSHOT (1.x) | 0.90.6 -> 0.90 | | | 1.0.0.alpha1 | 0.90.6 -> 0.90 | 2013-11-06 | @@ -207,10 +211,10 @@ sudo apt-get update sudo apt-get install openjdk-7-jre-headless # Download Elasticsearch -curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.6.deb -o elasticsearch-0.90.6.deb +curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.0.0.deb -o elasticsearch-1.0.0.deb # Prepare Elasticsearch installation -sudo dpkg -i elasticsearch-0.90.6.deb +sudo dpkg -i elasticsearch-1.0.0.deb ``` Check that elasticsearch is running: @@ -223,15 +227,14 @@ This command should give you a JSON result: ```javascript { - "ok" : true, "status" : 200, - "name" : "Grey, Dr. John", + "name" : "Living Colossus", "version" : { - "number" : "0.90.6", - "build_hash" : "e2a24efdde0cb7cc1b2071ffbbd1fd874a6d8d6b", - "build_timestamp" : "2013-11-04T13:44:16Z", + "number" : "1.0.0", + "build_hash" : "a46900e9c72c0a623d71b54016357d5f94c8ea32", + "build_timestamp" : "2014-02-12T16:18:34Z", "build_snapshot" : false, - "lucene_version" : "4.5.1" + "lucene_version" : "4.6" }, "tagline" : "You Know, for Search" } @@ -244,7 +247,7 @@ This command should give you a JSON result: sudo service elasticsearch stop # Install the plugin -sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/1.0.0.alpha1 +sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.0.0 # Configure it sudo vi /etc/elasticsearch/elasticsearch.yml diff --git a/pom.xml b/pom.xml index 3a463e8edde..c9c580e2bfe 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 2.0.0.RC1-SNAPSHOT + 2.0.0 jar Azure Cloud plugin for Elasticsearch 2013 @@ -42,8 +42,8 @@ governing permissions and limitations under the License. --> - 1.0.0.RC1-SNAPSHOT - 4.6.0 + 1.0.0 + 4.6.1 onerror 1 true From 65448fe6393ff42e5fdbb4f74a8f7ae92be00ede Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 14 Feb 2014 18:36:00 +0100 Subject: [PATCH 018/125] prepare for next development iteration --- README.md | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 90ed2d6d125..9209bc9153a 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ For 0.90.x elasticsearch versions, look at [1.x branch](https://github.com/elast | Azure Cloud Plugin | elasticsearch | Release date | |-----------------------------|---------------------|:------------:| -| 2.0.0.RC1-SNAPSHOT (master) | 1.0 -> master | | +| 2.1.0-SNAPSHOT (master) | 1.0 -> master | | | 2.0.0 | 1.0 -> master | 2014-02-14 | | 1.0.0-SNAPSHOT (1.x) | 0.90.6 -> 0.90 | | | 1.0.0.alpha1 | 0.90.6 -> 0.90 | 2013-11-06 | diff --git a/pom.xml b/pom.xml index c9c580e2bfe..8c171dbe152 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 2.0.0 + 2.1.0-SNAPSHOT jar Azure Cloud plugin for Elasticsearch 2013 From 92f629bcc4f1db794e46a9257e4edd62100eff4f Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 13 Mar 2014 15:35:00 +0100 Subject: [PATCH 019/125] Add plugin release semi-automatic script Closes #10. --- README.md | 8 +- dev-tools/build_release.py | 708 +++++++++++++++++++++++++++++++++++++ dev-tools/upload-s3.py | 67 ++++ pom.xml | 5 +- 4 files changed, 781 insertions(+), 7 deletions(-) create mode 100755 dev-tools/build_release.py create mode 100644 dev-tools/upload-s3.py diff --git a/README.md b/README.md index 9209bc9153a..218918539c8 100644 --- a/README.md +++ b/README.md @@ -5,15 +5,13 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.0.0`. -For 1.0.x elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). -For 0.90.x elasticsearch versions, look at [1.x branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/1.x). +* For 1.0.x elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). +* For 0.90.x elasticsearch versions, look at [1.x branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/1.x). | Azure Cloud Plugin | elasticsearch | Release date | |-----------------------------|---------------------|:------------:| -| 2.1.0-SNAPSHOT (master) | 1.0 -> master | | +| 2.1.0-SNAPSHOT | 1.0 -> master | XXXX-XX-XX | | 2.0.0 | 1.0 -> master | 2014-02-14 | -| 1.0.0-SNAPSHOT (1.x) | 0.90.6 -> 0.90 | | -| 1.0.0.alpha1 | 0.90.6 -> 0.90 | 2013-11-06 | Azure Virtual Machine Discovery diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py new file mode 100755 index 00000000000..9166b09e7e3 --- /dev/null +++ b/dev-tools/build_release.py @@ -0,0 +1,708 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import re +import tempfile +import shutil +import os +import datetime +import argparse +import github3 +import smtplib + +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +from os.path import dirname, abspath + +""" + This tool builds a release from the a given elasticsearch plugin branch. + In order to execute it go in the top level directory and run: + $ python3 dev_tools/build_release.py --branch master --publish --remote origin + + By default this script runs in 'dry' mode which essentially simulates a release. If the + '--publish' option is set the actual release is done. + If not in 'dry' mode, a mail will be automatically sent to the mailing list. + You can disable it with the option '--disable_mail' + + $ python3 dev_tools/build_release.py --publish --remote origin --disable_mail + + The script takes over almost all + steps necessary for a release from a high level point of view it does the following things: + + - run prerequisite checks ie. check for Java 1.6 being present or S3 credentials available as env variables + - detect the version to release from the specified branch (--branch) or the current branch + - creates a release branch & updates pom.xml and README.md to point to a release version rather than a snapshot + - builds the artifacts + - commits the new version and merges the release branch into the source branch + - creates a tag and pushes the commit to the specified origin (--remote) + - publishes the releases to sonatype and S3 + - send a mail based on github issues fixed by this version + +Once it's done it will print all the remaining steps. + + Prerequisites: + - Python 3k for script execution + - Boto for S3 Upload ($ apt-get install python-boto or pip-3.3 install boto) + - github3 module (pip-3.3 install github3.py) + - S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) + - GITHUB (login/password) or key exported via ENV Variables (GITHUB_LOGIN, GITHUB_PASSWORD or GITHUB_KEY) + (see https://github.com/settings/applications#personal-access-tokens) - Optional: default to no authentication + - SMTP_HOST - Optional: default to localhost + - MAIL_SENDER - Optional: default to 'david@pilato.fr': must be authorized to send emails to elasticsearch mailing list + - MAIL_TO - Optional: default to 'elasticsearch@googlegroups.com' +""" +env = os.environ + +LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log') +ROOT_DIR = os.path.join(abspath(dirname(__file__)), '../') +README_FILE = ROOT_DIR + 'README.md' +POM_FILE = ROOT_DIR + 'pom.xml' + +def log(msg): + log_plain('\n%s' % msg) + +def log_plain(msg): + f = open(LOG, mode='ab') + f.write(msg.encode('utf-8')) + f.close() + +def run(command, quiet=False): + log('%s: RUN: %s\n' % (datetime.datetime.now(), command)) + if os.system('%s >> %s 2>&1' % (command, LOG)): + msg = ' FAILED: %s [see log %s]' % (command, LOG) + if not quiet: + print(msg) + raise RuntimeError(msg) + +try: + JAVA_HOME = env['JAVA_HOME'] +except KeyError: + raise RuntimeError(""" + Please set JAVA_HOME in the env before running release tool + On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.6*'`""") + +try: + MVN='mvn' + # make sure mvn3 is used if mvn3 is available + # some systems use maven 2 as default + run('mvn3 --version', quiet=True) + MVN='mvn3' +except RuntimeError: + pass + + +def java_exe(): + path = JAVA_HOME + return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) + +def verify_java_version(version): + s = os.popen('%s; java -version 2>&1' % java_exe()).read() + if s.find(' version "%s.' % version) == -1: + raise RuntimeError('got wrong version for java %s:\n%s' % (version, s)) + +# Verifies the java version. We guarantee that we run with Java 1.6 +# If 1.6 is not available fail the build! +def verify_mvn_java_version(version, mvn): + s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read() + if s.find('Java version: %s' % version) == -1: + raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s)) + +# Returns the hash of the current git HEAD revision +def get_head_hash(): + return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip() + +# Returns the hash of the given tag revision +def get_tag_hash(tag): + return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip() + +# Returns the name of the current branch +def get_current_branch(): + return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip() + +verify_java_version('1.6') # we require to build with 1.6 +verify_mvn_java_version('1.6', MVN) + +# Utility that returns the name of the release branch for a given version +def release_branch(version): + return 'release_branch_%s' % version + +# runs get fetch on the given remote +def fetch(remote): + run('git fetch %s' % remote) + +# Creates a new release branch from the given source branch +# and rebases the source branch from the remote before creating +# the release branch. Note: This fails if the source branch +# doesn't exist on the provided remote. +def create_release_branch(remote, src_branch, release): + run('git checkout %s' % src_branch) + run('git pull --rebase %s %s' % (remote, src_branch)) + run('git checkout -b %s' % (release_branch(release))) + + +# Reads the given file and applies the +# callback to it. If the callback changed +# a line the given file is replaced with +# the modified input. +def process_file(file_path, line_callback): + fh, abs_path = tempfile.mkstemp() + modified = False + with open(abs_path,'w', encoding='utf-8') as new_file: + with open(file_path, encoding='utf-8') as old_file: + for line in old_file: + new_line = line_callback(line) + modified = modified or (new_line != line) + new_file.write(new_line) + os.close(fh) + if modified: + #Remove original file + os.remove(file_path) + #Move new file + shutil.move(abs_path, file_path) + return True + else: + # nothing to do - just remove the tmp file + os.remove(abs_path) + return False + +# Guess the next snapshot version number (increment second digit) +def guess_snapshot(version): + digits=list(map(int, re.findall(r'\d+', version))) + source='%s.%s' % (digits[0], digits[1]) + destination='%s.%s' % (digits[0], digits[1]+1) + return version.replace(source, destination) + +# Moves the pom.xml file from a snapshot to a release +def remove_maven_snapshot(pom, release): + pattern = '%s-SNAPSHOT' % release + replacement = '%s' % release + def callback(line): + return line.replace(pattern, replacement) + process_file(pom, callback) + +# Moves the README.md file from a snapshot to a release +def remove_version_snapshot(readme_file, release): + pattern = '%s-SNAPSHOT' % release + replacement = '%s ' % release + def callback(line): + return line.replace(pattern, replacement) + process_file(readme_file, callback) + +# Moves the pom.xml file to the next snapshot +def add_maven_snapshot(pom, release, snapshot): + pattern = '%s' % release + replacement = '%s-SNAPSHOT' % snapshot + def callback(line): + return line.replace(pattern, replacement) + process_file(pom, callback) + +# Add in README.md file the next snapshot +def add_version_snapshot(readme_file, release, snapshot): + pattern = '| %s ' % release + replacement = '| %s-SNAPSHOT' % snapshot + def callback(line): + # If we find pattern, we copy the line and replace its content + if line.find(pattern) >= 0: + return line.replace(pattern, replacement).replace('%s' % (datetime.datetime.now().strftime("%Y-%m-%d")), + 'XXXX-XX-XX')+line + else: + return line + process_file(readme_file, callback) + + +# Set release date in README.md file +def set_date(readme_file): + pattern = 'XXXX-XX-XX' + replacement = '%s' % (datetime.datetime.now().strftime("%Y-%m-%d")) + def callback(line): + return line.replace(pattern, replacement) + process_file(readme_file, callback) + +# Update installation instructions in README.md file +def set_install_instructions(readme_file, artifact_name, release): + pattern = '`bin/plugin -install elasticsearch/%s/.+`' % artifact_name + replacement = '`bin/plugin -install elasticsearch/%s/%s`' % (artifact_name, release) + def callback(line): + return re.sub(pattern, replacement, line) + process_file(readme_file, callback) + + +# Stages the given files for the next git commit +def add_pending_files(*files): + for file in files: + run('git add %s' % file) + +# Executes a git commit with 'release [version]' as the commit message +def commit_release(artifact_id, release): + run('git commit -m "prepare release %s-%s"' % (artifact_id, release)) + +def commit_snapshot(): + run('git commit -m "prepare for next development iteration"') + +def tag_release(release): + run('git tag -a v%s -m "Tag release version %s"' % (release, release)) + +def run_mvn(*cmd): + for c in cmd: + run('%s; %s -f %s %s' % (java_exe(), MVN, POM_FILE, c)) + +def build_release(run_tests=False, dry_run=True): + target = 'deploy' + if dry_run: + target = 'package' + if run_tests: + run_mvn('clean test') + run_mvn('clean %s -DskipTests' %(target)) + +# Checks the pom.xml for the release version. 2.0.0-SNAPSHOT +# This method fails if the pom file has no SNAPSHOT version set ie. +# if the version is already on a release version we fail. +# Returns the next version string ie. 0.90.7 +def find_release_version(src_branch): + run('git checkout %s' % src_branch) + with open(POM_FILE, encoding='utf-8') as file: + for line in file: + match = re.search(r'(.+)-SNAPSHOT', line) + if match: + return match.group(1) + raise RuntimeError('Could not find release version in branch %s' % src_branch) + +# extract a value from pom.xml +def find_from_pom(tag): + with open(POM_FILE, encoding='utf-8') as file: + for line in file: + match = re.search(r'<%s>(.+)' % (tag, tag), line) + if match: + return match.group(1) + raise RuntimeError('Could not find <%s> in pom.xml file' % (tag)) + +def get_artifacts(artifact_id, release): + artifact_path = ROOT_DIR + 'target/releases/%s-%s.zip' % (artifact_id, release) + print(' Path %s' % (artifact_path)) + if not os.path.isfile(artifact_path): + raise RuntimeError('Could not find required artifact at %s' % (artifact_path)) + return artifact_path + +# Generates sha1 for a file +# and returns the checksum files as well +# as the given files in a list +def generate_checksums(release_file): + res = [] + directory = os.path.dirname(release_file) + file = os.path.basename(release_file) + checksum_file = '%s.sha1.txt' % file + + if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)): + raise RuntimeError('Failed to generate checksum for file %s' % release_file) + res = res + [os.path.join(directory, checksum_file), release_file] + return res + +def git_merge(src_branch, release_version): + run('git checkout %s' % src_branch) + run('git merge %s' % release_branch(release_version)) + +def git_push(remote, src_branch, release_version, dry_run): + if not dry_run: + run('git push %s %s' % (remote, src_branch)) # push the commit + run('git push %s v%s' % (remote, release_version)) # push the tag + else: + print(' dryrun [True] -- skipping push to remote %s' % remote) + +def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True): + location = os.path.dirname(os.path.realpath(__file__)) + for artifact in artifacts: + if dry_run: + print('Skip Uploading %s to Amazon S3 in %s' % (artifact, base)) + else: + print('Uploading %s to Amazon S3' % artifact) + # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool + run('python %s/upload-s3.py --file %s --path %s' % (location, os.path.abspath(artifact), base)) + + +################# +## +## +## Email and Github Management +## +## +################# +def format_issues_plain(issues, title='Fix'): + response = "" + + if len(issues) > 0: + response += '%s:\n' % title + for issue in issues: + response += ' * [%s] - %s (%s)\n' % (issue.number, issue.title, issue.html_url) + + return response + +def format_issues_html(issues, title='Fix'): + response = "" + + if len(issues) > 0: + response += '

%s

\n
    \n' % title + for issue in issues: + response += '[%s] - %s\n' % (issue.html_url, issue.number, issue.title) + response += '
\n' + + return response + +def get_github_repository(reponame, + login=env.get('GITHUB_LOGIN', None), + password=env.get('GITHUB_PASSWORD', None), + key=env.get('GITHUB_KEY', None)): + if login: + g = github3.login(login, password) + elif key: + g = github3.login(token=key) + else: + g = github3.GitHub() + + return g.repository("elasticsearch", reponame) + +# Check if there are some remaining open issues and fails +def check_opened_issues(version, repository, reponame): + opened_issues = [i for i in repository.iter_issues(state='open', labels='%s' % version)] + if len(opened_issues)>0: + raise NameError('Some issues [%s] are still opened. Check https://github.com/elasticsearch/%s/issues?labels=%s&state=open' + % (len(opened_issues), reponame, version)) + +# List issues from github: can be done anonymously if you don't +# exceed a given number of github API calls per day +# Check if there are some remaining open issues and fails +def list_issues(version, + repository, + severity='bug'): + issues = [i for i in repository.iter_issues(state='closed', labels='%s,%s' % (severity, version))] + return issues + +# Get issues from github and generates a Plain/HTML Multipart email +# And send it if dry_run=False +def prepare_email(artifact_id, release_version, repository, + artifact_name, artifact_description, project_url, + severity_labels_bug='bug', + severity_labels_update='update', + severity_labels_new='new', + severity_labels_doc='doc'): + + ## Get bugs from github + issues_bug = list_issues(release_version, repository, severity=severity_labels_bug) + issues_update = list_issues(release_version, repository, severity=severity_labels_update) + issues_new = list_issues(release_version, repository, severity=severity_labels_new) + issues_doc = list_issues(release_version, repository, severity=severity_labels_doc) + + ## Format content to plain text + plain_issues_bug = format_issues_plain(issues_bug, 'Fix') + plain_issues_update = format_issues_plain(issues_update, 'Update') + plain_issues_new = format_issues_plain(issues_new, 'New') + plain_issues_doc = format_issues_plain(issues_doc, 'Doc') + + ## Format content to html + html_issues_bug = format_issues_html(issues_bug, 'Fix') + html_issues_update = format_issues_html(issues_update, 'Update') + html_issues_new = format_issues_html(issues_new, 'New') + html_issues_doc = format_issues_html(issues_doc, 'Doc') + + if len(issues_bug)+len(issues_update)+len(issues_new)+len(issues_doc) > 0: + plain_empty_message = "" + html_empty_message = "" + + else: + plain_empty_message = "No issue listed for this release" + html_empty_message = "

No issue listed for this release

" + + msg = MIMEMultipart('alternative') + msg['Subject'] = '[ANN] %s %s released' % (artifact_name, release_version) + text = """ +Heya, + + +We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s. + +%(artifact_description)s. + +%(project_url)s + +Release Notes - %(artifact_id)s - Version %(release_version)s + +%(empty_message)s +%(issues_bug)s +%(issues_update)s +%(issues_new)s +%(issues_doc)s + +Issues, Pull requests, Feature requests are warmly welcome on %(artifact_id)s project repository: %(project_url)s +For questions or comments around this plugin, feel free to use elasticsearch mailing list: https://groups.google.com/forum/#!forum/elasticsearch + +Enjoy, + +-The Elasticsearch team +""" % {'release_version': release_version, + 'artifact_id': artifact_id, + 'artifact_name': artifact_name, + 'artifact_description': artifact_description, + 'project_url': project_url, + 'empty_message': plain_empty_message, + 'issues_bug': plain_issues_bug, + 'issues_update': plain_issues_update, + 'issues_new': plain_issues_new, + 'issues_doc': plain_issues_doc} + + html = """ + + +

Heya,

+ +

We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s

+ +
%(artifact_description)s.
+ +

Release Notes - Version %(release_version)s

+%(empty_message)s +%(issues_bug)s +%(issues_update)s +%(issues_new)s +%(issues_doc)s + +

Issues, Pull requests, Feature requests are warmly welcome on +%(artifact_id)s project repository!

+

For questions or comments around this plugin, feel free to use elasticsearch +mailing list!

+ +

Enjoy,

+ +

- The Elasticsearch team

+ +""" % {'release_version': release_version, + 'artifact_id': artifact_id, + 'artifact_name': artifact_name, + 'artifact_description': artifact_description, + 'project_url': project_url, + 'empty_message': html_empty_message, + 'issues_bug': html_issues_bug, + 'issues_update': html_issues_update, + 'issues_new': html_issues_new, + 'issues_doc': html_issues_doc} + + # Record the MIME types of both parts - text/plain and text/html. + part1 = MIMEText(text, 'plain') + part2 = MIMEText(html, 'html') + + # Attach parts into message container. + # According to RFC 2046, the last part of a multipart message, in this case + # the HTML message, is best and preferred. + msg.attach(part1) + msg.attach(part2) + + return msg + +def send_email(msg, + dry_run=True, + mail=True, + sender=env.get('MAIL_SENDER'), + to=env.get('MAIL_TO', 'elasticsearch@googlegroups.com'), + smtp_server=env.get('SMTP_SERVER', 'localhost')): + msg['From'] = 'Elasticsearch Team <%s>' % sender + msg['To'] = 'Elasticsearch Mailing List <%s>' % to + # save mail on disk + with open(ROOT_DIR+'target/email.txt', 'w') as email_file: + email_file.write(msg.as_string()) + if mail and not dry_run: + s = smtplib.SMTP(smtp_server, 25) + s.sendmail(sender, to, msg.as_string()) + s.quit() + else: + print('generated email: open %starget/email.txt' % ROOT_DIR) + +def print_sonatype_notice(): + settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml') + if os.path.isfile(settings): + with open(settings, encoding='utf-8') as settings_file: + for line in settings_file: + if line.strip() == 'sonatype-nexus-snapshots': + # moving out - we found the indicator no need to print the warning + return + print(""" + NOTE: No sonatype settings detected, make sure you have configured + your sonatype credentials in '~/.m2/settings.xml': + + + ... + + + sonatype-nexus-snapshots + your-jira-id + your-jira-pwd + + + sonatype-nexus-staging + your-jira-id + your-jira-pwd + + + ... + + """) + +def check_s3_credentials(): + if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None): + raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3') + +def check_github_credentials(): + if not env.get('GITHUB_KEY', None) and not env.get('GITHUB_LOGIN', None): + log('WARN: Could not find "GITHUB_LOGIN" / "GITHUB_PASSWORD" or "GITHUB_KEY" in the env variables. You could need it.') + +def check_email_settings(): + if not env.get('MAIL_SENDER', None): + raise RuntimeError('Could not find "MAIL_SENDER"') + +# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml +print_sonatype_notice() + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Plugin Release') + parser.add_argument('--branch', '-b', metavar='master', default=get_current_branch(), + help='The branch to release from. Defaults to the current branch.') + parser.add_argument('--skiptests', '-t', dest='tests', action='store_false', + help='Skips tests before release. Tests are run by default.') + parser.set_defaults(tests=True) + parser.add_argument('--remote', '-r', metavar='origin', default='origin', + help='The remote to push the release commit and tag to. Default is [origin]') + parser.add_argument('--publish', '-p', dest='dryrun', action='store_false', + help='Publishes the release. Disable by default.') + parser.add_argument('--disable_mail', '-dm', dest='mail', action='store_false', + help='Do not send a release email. Email is sent by default.') + + parser.set_defaults(dryrun=True) + parser.set_defaults(mail=True) + args = parser.parse_args() + + src_branch = args.branch + remote = args.remote + run_tests = args.tests + dry_run = args.dryrun + mail = args.mail + + if not dry_run: + check_s3_credentials() + print('WARNING: dryrun is set to "false" - this will push and publish the release') + if mail: + check_email_settings() + print('An email to %s will be sent after the release' + % env.get('MAIL_TO', 'elasticsearch@googlegroups.com')) + input('Press Enter to continue...') + + check_github_credentials() + + print(''.join(['-' for _ in range(80)])) + print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run)) + print(' JAVA_HOME is [%s]' % JAVA_HOME) + print(' Running with maven command: [%s] ' % (MVN)) + + release_version = find_release_version(src_branch) + artifact_id = find_from_pom('artifactId') + artifact_name = find_from_pom('name') + artifact_description = find_from_pom('description') + project_url = find_from_pom('url') + print(' Artifact Id: [%s]' % artifact_id) + print(' Release version: [%s]' % release_version) + + # extract snapshot + default_snapshot_version = guess_snapshot(release_version) + snapshot_version = input('Enter next snapshot version [%s]:' % default_snapshot_version) + snapshot_version = snapshot_version or default_snapshot_version + + print(' Next version: [%s-SNAPSHOT]' % snapshot_version) + print(' Artifact Name: [%s]' % artifact_name) + print(' Artifact Description: [%s]' % artifact_description) + print(' Project URL: [%s]' % project_url) + + if not dry_run: + smoke_test_version = release_version + head_hash = get_head_hash() + run_mvn('clean') # clean the env! + create_release_branch(remote, src_branch, release_version) + print(' Created release branch [%s]' % (release_branch(release_version))) + success = False + try: + pending_files = [POM_FILE, README_FILE] + remove_maven_snapshot(POM_FILE, release_version) + remove_version_snapshot(README_FILE, release_version) + set_date(README_FILE) + set_install_instructions(README_FILE, artifact_id, release_version) + print(' Done removing snapshot version') + add_pending_files(*pending_files) # expects var args use * to expand + commit_release(artifact_id, release_version) + print(' Committed release version [%s]' % release_version) + print(''.join(['-' for _ in range(80)])) + print('Building Release candidate') + input('Press Enter to continue...') + print(' Checking github issues') + repository = get_github_repository(artifact_id) + check_opened_issues(release_version, repository, artifact_id) + if not dry_run: + print(' Running maven builds now and publish to sonatype - run-tests [%s]' % run_tests) + else: + print(' Running maven builds now run-tests [%s]' % run_tests) + build_release(run_tests=run_tests, dry_run=dry_run) + artifact = get_artifacts(artifact_id, release_version) + artifact_and_checksums = generate_checksums(artifact) + print(''.join(['-' for _ in range(80)])) + + print('Finish Release -- dry_run: %s' % dry_run) + input('Press Enter to continue...') + print(' merge release branch') + git_merge(src_branch, release_version) + print(' tag') + tag_release(release_version) + + add_maven_snapshot(POM_FILE, release_version, snapshot_version) + add_version_snapshot(README_FILE, release_version, snapshot_version) + add_pending_files(*pending_files) + commit_snapshot() + + print(' push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run)) + git_push(remote, src_branch, release_version, dry_run) + print(' publish artifacts to S3 -- dry_run: %s' % dry_run) + publish_artifacts(artifact_and_checksums, base='elasticsearch/%s' % (artifact_id) , dry_run=dry_run) + print(' preparing email (from github issues)') + msg = prepare_email(artifact_id, release_version, repository, artifact_name, artifact_description, project_url) + print(' sending email -- dry_run: %s, mail: %s' % (dry_run, mail)) + send_email(msg, dry_run=dry_run, mail=mail) + + pending_msg = """ +Release successful pending steps: + * close and release sonatype repo: https://oss.sonatype.org/ + * check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/%(artifact_id)s/%(version)s + * tweet about the release +""" + print(pending_msg % {'version': release_version, + 'artifact_id': artifact_id, + 'project_url': project_url}) + success = True + finally: + if not success: + run('git reset --hard HEAD') + run('git checkout %s' % src_branch) + elif dry_run: + print('End of dry_run') + input('Press Enter to reset changes...') + + run('git reset --hard %s' % head_hash) + run('git tag -d v%s' % release_version) + # we delete this one anyways + run('git branch -D %s' % (release_branch(release_version))) diff --git a/dev-tools/upload-s3.py b/dev-tools/upload-s3.py new file mode 100644 index 00000000000..95ea576e65c --- /dev/null +++ b/dev-tools/upload-s3.py @@ -0,0 +1,67 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import os +import sys +import argparse +try: + import boto.s3 +except: + raise RuntimeError(""" + S3 upload requires boto to be installed + Use one of: + 'pip install -U boto' + 'apt-get install python-boto' + 'easy_install boto' + """) + +import boto.s3 + + +def list_buckets(conn): + return conn.get_all_buckets() + + +def upload_s3(conn, path, key, file, bucket): + print 'Uploading %s to Amazon S3 bucket %s/%s' % \ + (file, bucket, os.path.join(path, key)) + def percent_cb(complete, total): + sys.stdout.write('.') + sys.stdout.flush() + bucket = conn.create_bucket(bucket) + k = bucket.new_key(os.path.join(path, key)) + k.set_contents_from_filename(file, cb=percent_cb, num_cb=100) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Uploads files to Amazon S3') + parser.add_argument('--file', '-f', metavar='path to file', + help='the branch to release from', required=True) + parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org', + help='The S3 Bucket to upload to') + parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch', + help='The key path to use') + parser.add_argument('--key', '-k', metavar='key', default=None, + help='The key - uses the file name as default key') + args = parser.parse_args() + if args.key: + key = args.key + else: + key = os.path.basename(args.file) + + connection = boto.connect_s3() + upload_s3(connection, args.path, key, args.file, args.bucket); + diff --git a/pom.xml b/pom.xml index 8c171dbe152..fcda58e69e3 100644 --- a/pom.xml +++ b/pom.xml @@ -13,13 +13,14 @@ governing permissions and limitations under the License. --> - elasticsearch-cloud-azure 4.0.0 org.elasticsearch elasticsearch-cloud-azure 2.1.0-SNAPSHOT jar - Azure Cloud plugin for Elasticsearch + Elasticsearch Azure cloud plugin + The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. + https://github.com/elasticsearch/elasticsearch-cloud-azure/ 2013 From 696cad1f67914e2e697884a7586debbcb6b58b53 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 13 Mar 2014 15:50:05 +0100 Subject: [PATCH 020/125] "azure vm list" command should be changed to "azure vm image list" in the README.md In the `readme.md` file, `azure vm list` command should be changed to `azure vm image list`. The former lists all the VMs running in the current Azure subscription and the latter displays list of official available images. The line is located just below the following line: `To get a list of official available images, run:` Closes #5. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 218918539c8..46a81081b5a 100644 --- a/README.md +++ b/README.md @@ -131,7 +131,7 @@ for more information. You will need to choose the operating system you want to run on. To get a list of official available images, run: ```sh -azure vm list +azure vm image list ``` Let's say we are going to deploy an Ubuntu image on an extra small instance in West Europe: From 11dfdfb189702f91d9c46e63ebe67a31bd7477a4 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 6 Jan 2014 23:14:34 +0100 Subject: [PATCH 021/125] Add Azure Storage repository elasticsearch 1.0 will provide a new feature named `Snapshot & Restore`. We want to add support for [Azure Storage](http://www.windowsazure.com/en-us/documentation/services/storage/). To enable Azure repositories, you have first to set your azure storage settings: ```yaml cloud: azure: storage_account: your_azure_storage_account storage_key: your_azure_storage_key ``` The Azure repository supports following settings: * `container`: Container name. Defaults to `elasticsearch-snapshots` * `base_path`: Specifies the path within container to repository data. Defaults to empty (root directory). * `concurrent_streams`: Throttles the number of streams (per node) preforming snapshot operation. Defaults to `5`. * `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `64m` (64m max) * `compress`: When set to `true` metadata files are stored in compressed format. This setting doesn't affect index files that are already compressed by default. Defaults to `false`. Some examples, using scripts: ```sh $ curl -XPUT 'http://localhost:9200/_snapshot/my_backup1' -d '{ "type": "azure" }' $ curl -XPUT 'http://localhost:9200/_snapshot/my_backup2' -d '{ "type": "azure", "settings": { "container": "backup_container", "base_path": "backups", "concurrent_streams": 2, "chunk_size": "32m", "compress": true } }' ``` Example using Java: ```java client.admin().cluster().preparePutRepository("my_backup3") .setType("azure").setSettings(ImmutableSettings.settingsBuilder() .put(AzureStorageService.Fields.CONTAINER, "backup_container") .put(AzureStorageService.Fields.CHUNK_SIZE, new ByteSizeValue(32, ByteSizeUnit.MB)) ).get(); ``` Closes #2. --- README.md | 76 ++++++ pom.xml | 5 + src/main/assemblies/plugin.xml | 10 +- .../cloud/azure/AzureComputeServiceImpl.java | 14 - .../cloud/azure/AzureModule.java | 88 +++++- .../cloud/azure/AzureSettingsFilter.java | 10 +- .../cloud/azure/AzureStorageService.java | 65 +++++ .../cloud/azure/AzureStorageServiceImpl.java | 239 ++++++++++++++++ .../blobstore/AbstractAzureBlobContainer.java | 137 ++++++++++ .../cloud/azure/blobstore/AzureBlobStore.java | 109 ++++++++ .../AzureImmutableBlobContainer.java | 57 ++++ .../discovery/azure/AzureDiscoveryModule.java | 4 +- .../plugin/cloud/azure/CloudAzurePlugin.java | 16 +- .../repositories/azure/AzureRepository.java | 146 ++++++++++ .../azure/AzureRepositoryModule.java | 61 +++++ src/main/resources/es-plugin.properties | 2 +- .../cloud/azure/AbstractAzureTest.java | 6 + .../AbstractAzureRepositoryServiceTest.java | 102 +++++++ .../azure/AzureSnapshotRestoreITest.java | 258 ++++++++++++++++++ .../azure/AzureSnapshotRestoreTest.java | 118 ++++++++ .../azure/AzureStorageServiceMock.java | 126 +++++++++ src/test/resources/elasticsearch.yml | 12 +- src/test/resources/log4j.xml | 7 +- 23 files changed, 1639 insertions(+), 29 deletions(-) create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java create mode 100644 src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java create mode 100644 src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java create mode 100644 src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java create mode 100644 src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java create mode 100644 src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java create mode 100644 src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java diff --git a/README.md b/README.md index 46a81081b5a..3f5528ae431 100644 --- a/README.md +++ b/README.md @@ -335,6 +335,82 @@ If you want to remove your running instances: azure vm delete myesnode1 ``` +Azure Repository +================ + +To enable Azure repositories, you have first to set your azure storage settings: + +``` + cloud: + azure: + storage_account: your_azure_storage_account + storage_key: your_azure_storage_key +``` + +The Azure repository supports following settings: + +* `container`: Container name. Defaults to `elasticsearch-snapshots` +* `base_path`: Specifies the path within container to repository data. Defaults to empty (root directory). +* `concurrent_streams`: Throttles the number of streams (per node) preforming snapshot operation. Defaults to `5`. +* `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified +in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `64m` (64m max) +* `compress`: When set to `true` metadata files are stored in compressed format. This setting doesn't affect index +files that are already compressed by default. Defaults to `false`. + +Some examples, using scripts: + +```sh +# The simpliest one +$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup1' -d '{ + "type": "azure" +}' + +# With some settings +$ curl -XPUT 'http://localhost:9200/_snapshot/my_backup2' -d '{ + "type": "azure", + "settings": { + "container": "backup_container", + "base_path": "backups", + "concurrent_streams": 2, + "chunk_size": "32m", + "compress": true + } +}' +``` + +Example using Java: + +```java +client.admin().cluster().preparePutRepository("my_backup3") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, "backup_container") + .put(AzureStorageService.Fields.CHUNK_SIZE, new ByteSizeValue(32, ByteSizeUnit.MB)) + ).get(); +``` + + +Testing +------- + +Integrations tests in this plugin require working Azure configuration and therefore disabled by default. +To enable tests prepare a config file elasticsearch.yml with the following content: + +``` + repositories: + azure: + account: "YOUR-AZURE-STORAGE-NAME" + key: "YOUR-AZURE-STORAGE-KEY" +``` + +Replaces `account`, `key` with your settings. Please, note that the test will delete all snapshot/restore related files in the specified bucket. + +To run test: + +```sh +mvn -Dtests.azure=true -Des.config=/path/to/config/file/elasticsearch.yml clean test +``` + + License ------- diff --git a/pom.xml b/pom.xml index fcda58e69e3..cf31508e264 100644 --- a/pom.xml +++ b/pom.xml @@ -68,6 +68,11 @@ governing permissions and limitations under the License. --> elasticsearch ${elasticsearch.version}
+ + com.microsoft.windowsazure + microsoft-windowsazure-api + 0.4.5 + log4j log4j diff --git a/src/main/assemblies/plugin.xml b/src/main/assemblies/plugin.xml index 634831c235c..b18ecaff3a0 100644 --- a/src/main/assemblies/plugin.xml +++ b/src/main/assemblies/plugin.xml @@ -1,5 +1,5 @@ - org.elasticsearch:elasticsearch + + / + true + true + + com.microsoft.windowsazure:microsoft-windowsazure-api + + diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index 60428c35873..cb74062723e 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -20,11 +20,9 @@ package org.elasticsearch.cloud.azure; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.settings.SettingsFilter; import org.w3c.dom.Document; import org.w3c.dom.Node; @@ -84,12 +82,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent *
  • If needed this module will bind azure discovery service by default * to AzureComputeServiceImpl.
  • + *
  • If needed this module will bind azure repository service by default + * to AzureStorageServiceImpl.
  • * * * @see org.elasticsearch.cloud.azure.AzureComputeServiceImpl + * @see org.elasticsearch.cloud.azure.AzureStorageServiceImpl */ public class AzureModule extends AbstractModule { protected final ESLogger logger; @@ -51,10 +56,18 @@ public class AzureModule extends AbstractModule { logger.debug("starting azure services"); // If we have set discovery to azure, let's start the azure compute service - if (isDiscoveryReady(settings)) { + if (isDiscoveryReady(settings, logger)) { logger.debug("starting azure discovery service"); - bind(AzureComputeService.class) - .to(settings.getAsClass("cloud.azure.api.impl", AzureComputeServiceImpl.class)) + bind(AzureComputeService.class) + .to(settings.getAsClass("cloud.azure.api.impl", AzureComputeServiceImpl.class)) + .asEagerSingleton(); + } + + // If we have settings for azure repository, let's start the azure storage service + if (isSnapshotReady(settings, logger)) { + logger.debug("starting azure repository service"); + bind(AzureStorageService.class) + .to(settings.getAsClass("repositories.azure.api.impl", AzureStorageServiceImpl.class)) .asEagerSingleton(); } } @@ -63,7 +76,72 @@ public class AzureModule extends AbstractModule { * Check if discovery is meant to start * @return true if we can start discovery features */ - public static boolean isDiscoveryReady(Settings settings) { - return (AzureDiscovery.AZURE.equalsIgnoreCase(settings.get("discovery.type"))); + public static boolean isCloudReady(Settings settings) { + return (settings.getAsBoolean("cloud.enabled", true)); } + + /** + * Check if discovery is meant to start + * @return true if we can start discovery features + */ + public static boolean isDiscoveryReady(Settings settings, ESLogger logger) { + // Cloud services are disabled + if (!isCloudReady(settings)) { + logger.trace("cloud settings are disabled"); + return false; + } + + // User set discovery.type: azure + if (!AzureDiscovery.AZURE.equalsIgnoreCase(settings.get("discovery.type"))) { + logger.trace("discovery.type not set to {}", AzureDiscovery.AZURE); + return false; + } + + if (isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID, logger) || + isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SERVICE_NAME, logger) || + isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.KEYSTORE, logger) || + isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.PASSWORD, logger) + ) { + return false; + } + + logger.trace("all required properties for azure discovery are set!"); + + return true; + } + + /** + * Check if we have repository azure settings available + * @return true if we can use snapshot and restore + */ + public static boolean isSnapshotReady(Settings settings, ESLogger logger) { + // Cloud services are disabled + if (!isCloudReady(settings)) { + logger.trace("cloud settings are disabled"); + return false; + } + + if (isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT, null) || + isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.KEY, null)) { + logger.trace("azure repository is not set using {} and {} properties", + AzureStorageService.Fields.ACCOUNT, + AzureStorageService.Fields.KEY); + return false; + } + + logger.trace("all required properties for azure repository are set!"); + + return true; + } + + public static boolean isPropertyMissing(Settings settings, String name, ESLogger logger) throws ElasticsearchException { + if (!Strings.hasText(settings.get(name))) { + if (logger != null) { + logger.warn("{} is not set or is incorrect.", name); + } + return true; + } + return false; + } + } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java index e56bb03c993..f8911ae8830 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -23,16 +23,22 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.SettingsFilter; /** - * Filtering cloud.azure.* settings + * Filtering cloud.azure.* and repositories.azure.* settings */ public class AzureSettingsFilter implements SettingsFilter.Filter { @Override public void filter(ImmutableSettings.Builder settings) { // Cloud settings - settings.remove("cloud.certificate"); + settings.remove("cloud.azure.keystore"); settings.remove("cloud.azure.password"); settings.remove("cloud.azure.subscription_id"); settings.remove("cloud.azure.service_name"); + + // Repositories settings + settings.remove("repositories.azure.account"); + settings.remove("repositories.azure.key"); + settings.remove("repositories.azure.container"); + settings.remove("repositories.azure.base_path"); } } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java new file mode 100644 index 00000000000..f346348ed88 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import com.microsoft.windowsazure.services.core.ServiceException; +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.collect.ImmutableMap; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URISyntaxException; + +/** + * Azure Storage Service interface + * @see org.elasticsearch.cloud.azure.AzureStorageServiceImpl for Azure REST API implementation + */ +public interface AzureStorageService { + static public final class Fields { + public static final String ACCOUNT = "storage_account"; + public static final String KEY = "storage_key"; + public static final String CONTAINER = "container"; + public static final String BASE_PATH = "base_path"; + public static final String CONCURRENT_STREAMS = "concurrent_streams"; + public static final String CHUNK_SIZE = "chunk_size"; + public static final String COMPRESS = "compress"; + } + + boolean doesContainerExist(String container); + + void removeContainer(String container) throws URISyntaxException, StorageException; + + void createContainer(String container) throws URISyntaxException, StorageException; + + void deleteFiles(String container, String path) throws URISyntaxException, StorageException, ServiceException; + + boolean blobExists(String container, String blob) throws URISyntaxException, StorageException; + + void deleteBlob(String container, String blob) throws URISyntaxException, StorageException; + + InputStream getInputStream(String container, String blob) throws ServiceException; + + ImmutableMap listBlobsByPrefix(String container, String prefix) throws URISyntaxException, StorageException, ServiceException; + + void putObject(String container, String blob, InputStream is, long length) throws URISyntaxException, StorageException, IOException; + + +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java new file mode 100644 index 00000000000..c396aaae679 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -0,0 +1,239 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import com.microsoft.windowsazure.services.blob.BlobConfiguration; +import com.microsoft.windowsazure.services.blob.BlobContract; +import com.microsoft.windowsazure.services.blob.BlobService; +import com.microsoft.windowsazure.services.blob.client.CloudBlobClient; +import com.microsoft.windowsazure.services.blob.client.CloudBlobContainer; +import com.microsoft.windowsazure.services.blob.client.CloudBlockBlob; +import com.microsoft.windowsazure.services.blob.client.ListBlobItem; +import com.microsoft.windowsazure.services.blob.models.BlobProperties; +import com.microsoft.windowsazure.services.blob.models.GetBlobResult; +import com.microsoft.windowsazure.services.blob.models.ListBlobsOptions; +import com.microsoft.windowsazure.services.blob.models.ListBlobsResult; +import com.microsoft.windowsazure.services.core.Configuration; +import com.microsoft.windowsazure.services.core.ServiceException; +import com.microsoft.windowsazure.services.core.storage.CloudStorageAccount; +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.collect.ImmutableMap; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; + +/** + * + */ +public class AzureStorageServiceImpl extends AbstractLifecycleComponent + implements AzureStorageService { + + private final String account; + private final String key; + private final String blob; + + private CloudStorageAccount storage_account; + private CloudBlobClient client; + private BlobContract service; + + + @Inject + public AzureStorageServiceImpl(Settings settings, SettingsFilter settingsFilter) { + super(settings); + settingsFilter.addFilter(new AzureSettingsFilter()); + + // We try to load storage API settings from `cloud.azure.` + account = settings.get("cloud.azure." + Fields.ACCOUNT); + key = settings.get("cloud.azure." + Fields.KEY); + blob = "http://" + account + ".blob.core.windows.net/"; + + try { + if (account != null) { + if (logger.isTraceEnabled()) logger.trace("creating new Azure storage client using account [{}], key [{}], blob [{}]", + account, key, blob); + + String storageConnectionString = + "DefaultEndpointsProtocol=http;" + + "AccountName="+ account +";" + + "AccountKey=" + key; + + Configuration configuration = Configuration.getInstance(); + configuration.setProperty(BlobConfiguration.ACCOUNT_NAME, account); + configuration.setProperty(BlobConfiguration.ACCOUNT_KEY, key); + configuration.setProperty(BlobConfiguration.URI, blob); + service = BlobService.create(configuration); + + storage_account = CloudStorageAccount.parse(storageConnectionString); + client = storage_account.createCloudBlobClient(); + } + } catch (Exception e) { + // Can not start Azure Storage Client + logger.error("can not start azure storage client: {}", e.getMessage()); + } + } + + @Override + public boolean doesContainerExist(String container) { + try { + CloudBlobContainer blob_container = client.getContainerReference(container); + return blob_container.exists(); + } catch (Exception e) { + logger.error("can not access container [{}]", container); + } + return false; + } + + @Override + public void removeContainer(String container) throws URISyntaxException, StorageException { + CloudBlobContainer blob_container = client.getContainerReference(container); + blob_container.delete(); + } + + @Override + public void createContainer(String container) throws URISyntaxException, StorageException { + CloudBlobContainer blob_container = client.getContainerReference(container); + if (logger.isTraceEnabled()) { + logger.trace("creating container [{}]", container); + } + blob_container.createIfNotExist(); + } + + @Override + public void deleteFiles(String container, String path) throws URISyntaxException, StorageException, ServiceException { + if (logger.isTraceEnabled()) { + logger.trace("delete files container [{}], path [{}]", + container, path); + } + + // Container name must be lower case. + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + ListBlobsOptions options = new ListBlobsOptions(); + options.setPrefix(path); + + List blobs = service.listBlobs(container, options).getBlobs(); + for (ListBlobsResult.BlobEntry blob : blobs) { + if (logger.isTraceEnabled()) { + logger.trace("removing in container [{}], path [{}], blob [{}]", + container, path, blob.getName()); + } + service.deleteBlob(container, blob.getName()); + } + } + } + + @Override + public boolean blobExists(String container, String blob) throws URISyntaxException, StorageException { + // Container name must be lower case. + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob); + return azureBlob.exists(); + } + + return false; + } + + @Override + public void deleteBlob(String container, String blob) throws URISyntaxException, StorageException { + if (logger.isTraceEnabled()) { + logger.trace("delete blob for container [{}], blob [{}]", + container, blob); + } + + // Container name must be lower case. + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + if (logger.isTraceEnabled()) { + logger.trace("blob found. removing.", + container, blob); + } + // TODO A REVOIR + CloudBlockBlob azureBlob = blob_container.getBlockBlobReference(blob); + azureBlob.delete(); + } + } + + @Override + public InputStream getInputStream(String container, String blob) throws ServiceException { + GetBlobResult blobResult = service.getBlob(container, blob); + return blobResult.getContentStream(); + } + + @Override + public ImmutableMap listBlobsByPrefix(String container, String prefix) throws URISyntaxException, StorageException, ServiceException { + logger.debug("listBlobsByPrefix container [{}], prefix [{}]", container, prefix); + ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); + + CloudBlobContainer blob_container = client.getContainerReference(container); + if (blob_container.exists()) { + Iterable blobs = blob_container.listBlobs(prefix); + for (ListBlobItem blob : blobs) { + URI uri = blob.getUri(); + if (logger.isTraceEnabled()) { + logger.trace("blob url [{}]", uri); + } + String blobpath = uri.getPath().substring(container.length() + 1); + BlobProperties properties = service.getBlobProperties(container, blobpath).getProperties(); + String name = uri.getPath().substring(prefix.length()); + if (logger.isTraceEnabled()) { + logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getContentLength()); + } + blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getContentLength())); + } + } + + return blobsBuilder.build(); + } + + @Override + public void putObject(String container, String blobname, InputStream is, long length) throws URISyntaxException, StorageException, IOException { + if (logger.isTraceEnabled()) { + logger.trace("creating blob in container [{}], blob [{}], length [{}]", + container, blobname, length); + } + CloudBlockBlob blob = client.getContainerReference(container).getBlockBlobReference(blobname); + blob.upload(is, length); + } + + @Override + protected void doStart() throws ElasticsearchException { + logger.debug("starting azure storage client instance"); + } + + @Override + protected void doStop() throws ElasticsearchException { + logger.debug("stopping azure storage client instance"); + } + + @Override + protected void doClose() throws ElasticsearchException { + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java new file mode 100644 index 00000000000..2d6bac608b3 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import com.microsoft.windowsazure.services.core.ServiceException; +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; +import org.elasticsearch.common.collect.ImmutableMap; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URISyntaxException; + +/** + * + */ +public class AbstractAzureBlobContainer extends AbstractBlobContainer { + + protected final ESLogger logger = ESLoggerFactory.getLogger(AbstractAzureBlobContainer.class.getName()); + protected final AzureBlobStore blobStore; + + protected final String keyPath; + + public AbstractAzureBlobContainer(BlobPath path, AzureBlobStore blobStore) { + super(path); + this.blobStore = blobStore; + String keyPath = path.buildAsString("/"); + if (!keyPath.isEmpty()) { + keyPath = keyPath + "/"; + } + this.keyPath = keyPath; + } + + @Override + public boolean blobExists(String blobName) { + try { + return blobStore.client().blobExists(blobStore.container(), buildKey(blobName)); + } catch (URISyntaxException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); + } catch (StorageException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); + } + return false; + } + + @Override + public boolean deleteBlob(String blobName) throws IOException { + try { + blobStore.client().deleteBlob(blobStore.container(), buildKey(blobName)); + return true; + } catch (URISyntaxException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } catch (StorageException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } + } + + @Override + public void readBlob(final String blobName, final ReadBlobListener listener) { + blobStore.executor().execute(new Runnable() { + @Override + public void run() { + InputStream is = null; + try { + is = blobStore.client().getInputStream(blobStore.container(), buildKey(blobName)); + byte[] buffer = new byte[blobStore.bufferSizeInBytes()]; + int bytesRead; + while ((bytesRead = is.read(buffer)) != -1) { + listener.onPartial(buffer, 0, bytesRead); + } + is.close(); + listener.onCompleted(); + } catch (Throwable e) { + IOUtils.closeWhileHandlingException(is); + listener.onFailure(e); + } + } + }); + } + + @Override + public ImmutableMap listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { + final String prefix; + if (blobNamePrefix != null) { + prefix = buildKey(blobNamePrefix); + } else { + prefix = keyPath; + } + + try { + return blobStore.client().listBlobsByPrefix(blobStore.container(), prefix); + } catch (URISyntaxException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobNamePrefix, blobStore.container(), e.getMessage()); + throw new IOException(e); + } catch (StorageException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobNamePrefix, blobStore.container(), e.getMessage()); + throw new IOException(e); + } catch (ServiceException e) { + logger.warn("can not access [{}] in container {{}}: {}", blobNamePrefix, blobStore.container(), e.getMessage()); + throw new IOException(e); + } + } + + @Override + public ImmutableMap listBlobs() throws IOException { + return listBlobsByPrefix(null); + } + + protected String buildKey(String blobName) { + return keyPath + blobName; + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java new file mode 100644 index 00000000000..3958e94a956 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import com.microsoft.windowsazure.services.core.ServiceException; +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.ImmutableBlobContainer; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.net.URISyntaxException; +import java.util.concurrent.Executor; + +/** + * + */ +public class AzureBlobStore extends AbstractComponent implements BlobStore { + + private final AzureStorageService client; + + private final String container; + + private final Executor executor; + + private final int bufferSizeInBytes; + + public AzureBlobStore(Settings settings, AzureStorageService client, String container, Executor executor) throws URISyntaxException, StorageException { + super(settings); + this.client = client; + this.container = container; + this.executor = executor; + + this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes(); + + if (!client.doesContainerExist(container)) { + client.createContainer(container); + } + } + + @Override + public String toString() { + return container; + } + + public AzureStorageService client() { + return client; + } + + public String container() { + return container; + } + + public Executor executor() { + return executor; + } + + public int bufferSizeInBytes() { + return bufferSizeInBytes; + } + + @Override + public ImmutableBlobContainer immutableBlobContainer(BlobPath path) { + return new AzureImmutableBlobContainer(path, this); + } + + @Override + public void delete(BlobPath path) { + String keyPath = path.buildAsString("/"); + if (!keyPath.isEmpty()) { + keyPath = keyPath + "/"; + } + + try { + client.deleteFiles(container, keyPath); + } catch (URISyntaxException e) { + logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + } catch (StorageException e) { + logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + } catch (ServiceException e) { + logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); + } + } + + @Override + public void close() { + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java new file mode 100644 index 00000000000..2b4f80ba88b --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.ImmutableBlobContainer; +import org.elasticsearch.common.blobstore.support.BlobStores; + +import java.io.IOException; +import java.io.InputStream; + +/** + * + */ +public class AzureImmutableBlobContainer extends AbstractAzureBlobContainer implements ImmutableBlobContainer { + + public AzureImmutableBlobContainer(BlobPath path, AzureBlobStore blobStore) { + super(path, blobStore); + } + + @Override + public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) { + blobStore.executor().execute(new Runnable() { + @Override + public void run() { + try { + blobStore.client().putObject(blobStore.container(), buildKey(blobName), is, sizeInBytes); + listener.onCompleted(); + } catch (Throwable e) { + listener.onFailure(e); + } + } + }); + } + + @Override + public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException { + BlobStores.syncWriteBlob(this, blobName, is, sizeInBytes); + } +} diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java index 8ebb674f432..490ee61719f 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java @@ -43,8 +43,8 @@ public class AzureDiscoveryModule extends ZenDiscoveryModule { } @Override protected void bindDiscovery() { - if (AzureModule.isDiscoveryReady(settings)) { - bind(Discovery.class).to(AzureDiscovery.class).asEagerSingleton(); + if (AzureModule.isDiscoveryReady(settings, logger)) { + bind(Discovery.class).to(AzureDiscovery.class).asEagerSingleton(); } else { logger.debug("disabling azure discovery features"); } diff --git a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java index 0caedbccf54..469ce6a0ddb 100644 --- a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java +++ b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -22,8 +22,13 @@ package org.elasticsearch.plugin.cloud.azure; import org.elasticsearch.cloud.azure.AzureModule; import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.AbstractPlugin; +import org.elasticsearch.repositories.RepositoriesModule; +import org.elasticsearch.repositories.azure.AzureRepository; +import org.elasticsearch.repositories.azure.AzureRepositoryModule; import java.util.Collection; @@ -33,6 +38,7 @@ import java.util.Collection; public class CloudAzurePlugin extends AbstractPlugin { private final Settings settings; + private final ESLogger logger = ESLoggerFactory.getLogger(CloudAzurePlugin.class.getName()); public CloudAzurePlugin(Settings settings) { this.settings = settings; @@ -51,9 +57,17 @@ public class CloudAzurePlugin extends AbstractPlugin { @Override public Collection> modules() { Collection> modules = Lists.newArrayList(); - if (settings.getAsBoolean("cloud.enabled", true)) { + if (AzureModule.isCloudReady(settings)) { modules.add(AzureModule.class); } return modules; } + + @Override + public void processModule(Module module) { + if (AzureModule.isSnapshotReady(settings, logger) + && module instanceof RepositoriesModule) { + ((RepositoriesModule)module).registerRepository(AzureRepository.TYPE, AzureRepositoryModule.class); + } + } } diff --git a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java new file mode 100644 index 00000000000..795ff123982 --- /dev/null +++ b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Azure file system implementation of the BlobStoreRepository + *

    + * Azure file system repository supports the following settings: + *

    + *
    {@code container}
    Azure container name. Defaults to elasticsearch-snapshots
    + *
    {@code base_path}
    Specifies the path within bucket to repository data. Defaults to root directory.
    + *
    {@code concurrent_streams}
    Number of concurrent read/write stream (per repository on each node). Defaults to 5.
    + *
    {@code chunk_size}
    Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to 64mb.
    + *
    {@code compress}
    If set to true metadata files will be stored compressed. Defaults to false.
    + *
    + */ +public class AzureRepository extends BlobStoreRepository { + + public final static String TYPE = "azure"; + public final static String CONTAINER_DEFAULT = "elasticsearch-snapshots"; + + private final AzureBlobStore blobStore; + + private final BlobPath basePath; + + private ByteSizeValue chunkSize; + + private boolean compress; + + /** + * Constructs new shared file system repository + * + * @param name repository name + * @param repositorySettings repository settings + * @param indexShardRepository index shard repository + * @param azureStorageService Azure Storage service + * @throws java.io.IOException + */ + @Inject + public AzureRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, AzureStorageService azureStorageService) throws IOException, URISyntaxException, StorageException { + super(name.getName(), repositorySettings, indexShardRepository); + + String container = repositorySettings.settings().get(AzureStorageService.Fields.CONTAINER, + componentSettings.get(AzureStorageService.Fields.CONTAINER, CONTAINER_DEFAULT)); + + int concurrentStreams = repositorySettings.settings().getAsInt(AzureStorageService.Fields.CONCURRENT_STREAMS, + componentSettings.getAsInt(AzureStorageService.Fields.CONCURRENT_STREAMS, 5)); + ExecutorService concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 5, TimeUnit.SECONDS, + EsExecutors.daemonThreadFactory(settings, "[azure_stream]")); + + this.blobStore = new AzureBlobStore(settings, azureStorageService, container, concurrentStreamPool); + this.chunkSize = repositorySettings.settings().getAsBytesSize(AzureStorageService.Fields.CHUNK_SIZE, + componentSettings.getAsBytesSize(AzureStorageService.Fields.CHUNK_SIZE, new ByteSizeValue(64, ByteSizeUnit.MB))); + + if (this.chunkSize.getMb() > 64) { + logger.warn("azure repository does not support yet size > 64mb. Fall back to 64mb."); + this.chunkSize = new ByteSizeValue(64, ByteSizeUnit.MB); + } + + this.compress = repositorySettings.settings().getAsBoolean(AzureStorageService.Fields.COMPRESS, + componentSettings.getAsBoolean(AzureStorageService.Fields.COMPRESS, false)); + String basePath = repositorySettings.settings().get(AzureStorageService.Fields.BASE_PATH, null); + + if (Strings.hasLength(basePath)) { + // Remove starting / if any + basePath = Strings.trimLeadingCharacter(basePath, '/'); + BlobPath path = new BlobPath(); + for(String elem : Strings.splitStringToArray(basePath, '/')) { + path = path.add(elem); + } + this.basePath = path; + } else { + this.basePath = BlobPath.cleanPath(); + } + logger.debug("using container [{}], chunk_size [{}], concurrent_streams [{}], compress [{}], base_path [{}]", + container, chunkSize, concurrentStreams, compress, basePath); + } + + /** + * {@inheritDoc} + */ + @Override + protected BlobStore blobStore() { + return blobStore; + } + + @Override + protected BlobPath basePath() { + return basePath; + } + + /** + * {@inheritDoc} + */ + @Override + protected boolean isCompress() { + return compress; + } + + /** + * {@inheritDoc} + */ + @Override + protected ByteSizeValue chunkSize() { + return chunkSize; + } + + +} diff --git a/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java b/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java new file mode 100644 index 00000000000..9197d114003 --- /dev/null +++ b/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryModule.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.cloud.azure.AzureModule; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.snapshots.IndexShardRepository; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; +import org.elasticsearch.repositories.Repository; + +/** + * Azure repository module + */ +public class AzureRepositoryModule extends AbstractModule { + + protected final ESLogger logger; + private Settings settings; + + @Inject + public AzureRepositoryModule(Settings settings) { + super(); + this.logger = Loggers.getLogger(getClass(), settings); + this.settings = settings; + } + + /** + * {@inheritDoc} + */ + @Override + protected void configure() { + if (AzureModule.isSnapshotReady(settings, logger)) { + bind(Repository.class).to(AzureRepository.class).asEagerSingleton(); + bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton(); + } else { + logger.debug("disabling azure snapshot and restore features"); + } + } + +} + diff --git a/src/main/resources/es-plugin.properties b/src/main/resources/es-plugin.properties index 21cdee38637..3bcf1e294a7 100644 --- a/src/main/resources/es-plugin.properties +++ b/src/main/resources/es-plugin.properties @@ -1,4 +1,4 @@ -# Licensed to ElasticSearch under one or more contributor +# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with this work for additional # information regarding copyright ownership. ElasticSearch licenses this file to you # under the Apache License, Version 2.0 (the "License"); you may not use this diff --git a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java index 8f34054e7f3..3aeb1e2866f 100644 --- a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java +++ b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -48,6 +48,12 @@ public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { discovery: type: azure + + repositories: + azure: + account: "yourstorageaccount" + key: "storage key" + container: "container name" * */ @Documented diff --git a/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java b/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java new file mode 100644 index 00000000000..2cc457ad85e --- /dev/null +++ b/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.microsoft.windowsazure.services.core.ServiceException; +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.test.store.MockDirectoryHelper; +import org.junit.After; +import org.junit.Before; + +import java.net.URISyntaxException; + +public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTest { + + protected String basePath; + private Class mock; + + public AbstractAzureRepositoryServiceTest(Class mock, + String basePath) { + // We want to inject the Azure API Mock + this.mock = mock; + this.basePath = basePath; + } + + /** + * Deletes repositories, supports wildcard notation. + */ + public static void wipeRepositories(String... repositories) { + // if nothing is provided, delete all + if (repositories.length == 0) { + repositories = new String[]{"*"}; + } + for (String repository : repositories) { + try { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } catch (RepositoryMissingException ex) { + // ignore + } + } + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + .put("cloud.azure." + AzureStorageService.Fields.ACCOUNT, "mock_azure_account") + .put("cloud.azure." + AzureStorageService.Fields.KEY, "mock_azure_key") + .put("repositories.azure.api.impl", mock) + .put("repositories.azure.container", "snapshots"); + + return builder.build(); + } + + @Override + public Settings indexSettings() { + // During restore we frequently restore index to exactly the same state it was before, that might cause the same + // checksum file to be written twice during restore operation + return ImmutableSettings.builder().put(super.indexSettings()) + .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false) + .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + } + + @Before @After + public final void wipe() throws StorageException, ServiceException, URISyntaxException { + wipeRepositories(); + cleanRepositoryFiles(basePath); + } + + /** + * Purge the test container + */ + public void cleanRepositoryFiles(String path) throws StorageException, ServiceException, URISyntaxException { + String container = cluster().getInstance(Settings.class).get("repositories.azure.container"); + logger.info("--> remove blobs in container [{}]", container); + AzureStorageService client = cluster().getInstance(AzureStorageService.class); + client.deleteFiles(container, path); + } +} diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java new file mode 100644 index 00000000000..33220cad250 --- /dev/null +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -0,0 +1,258 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + + +import com.microsoft.windowsazure.services.core.ServiceException; +import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.store.MockDirectoryHelper; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.net.URISyntaxException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * This test needs Azure to run and -Dtests.azure=true to be set + * and -Des.config=/path/to/elasticsearch.yml + * @see org.elasticsearch.cloud.azure.AbstractAzureTest + */ +@AbstractAzureTest.AzureTest +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.SUITE, + numNodes = 2, + transportClientRatio = 0.0) +public class AzureSnapshotRestoreITest extends AbstractAzureTest { + + private final String basePath; + + public AzureSnapshotRestoreITest() { + basePath = "/snapshot-itest/repo-" + randomInt(); + } + + @Override + public Settings indexSettings() { + // During restore we frequently restore index to exactly the same state it was before, that might cause the same + // checksum file to be written twice during restore operation + return ImmutableSettings.builder().put(super.indexSettings()) + .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false) + .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false) + .build(); + } + + @Before + public final void wipeBefore() throws StorageException, ServiceException, URISyntaxException { + wipeRepositories(); + cleanRepositoryFiles(basePath); + } + + @After + public final void wipeAfter() throws StorageException, ServiceException, URISyntaxException { + wipeRepositories(); + cleanRepositoryFiles(basePath); + } + + @Test + public void testSimpleWorkflow() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", basePath); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration") + .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> delete some data"); + for (int i = 0; i < 50; i++) { + client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); + } + for (int i = 50; i < 100; i++) { + client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); + } + for (int i = 0; i < 100; i += 2) { + client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + logger.info("--> close indices"); + client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + // Test restore after index deletion + logger.info("--> delete indices"); + wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion"); + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + } + + @Test + public void testMultipleRepositories() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", basePath); + PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration1") + .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); + PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration2") + .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse2.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + + logger.info("--> snapshot 1"); + CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1").get(); + assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards())); + + logger.info("--> snapshot 2"); + CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap").setWaitForCompletion(true).setIndices("test-idx-2").get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo1").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo2").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + // Test restore after index deletion + logger.info("--> delete indices"); + wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion from snapshot 1"); + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1").execute().actionGet(); + assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + + logger.info("--> restore other index after deletion from snapshot 2"); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap").setWaitForCompletion(true).setIndices("test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse2.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(true)); + } + + /** + * Deletes repositories, supports wildcard notation. + */ + public static void wipeRepositories(String... repositories) { + // if nothing is provided, delete all + if (repositories.length == 0) { + repositories = new String[]{"*"}; + } + for (String repository : repositories) { + try { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } catch (RepositoryMissingException ex) { + // ignore + } + } + } + + /** + * Purge the test container + */ + public void cleanRepositoryFiles(String path) throws StorageException, ServiceException, URISyntaxException { + String container = cluster().getInstance(Settings.class).get("repositories.azure.container", + AzureRepository.CONTAINER_DEFAULT); + logger.info("--> remove blobs in container [{}], path [{}]", container, path); + AzureStorageService client = cluster().getInstance(AzureStorageService.class); + + // Remove starting / if any + path = Strings.trimLeadingCharacter(path, '/'); + + client.deleteFiles(container, path); + } +} diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java new file mode 100644 index 00000000000..03b808cd654 --- /dev/null +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + + +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +@ElasticsearchIntegrationTest.ClusterScope( + scope = ElasticsearchIntegrationTest.Scope.SUITE, + numNodes = 1, + transportClientRatio = 0.0) +public class AzureSnapshotRestoreTest extends AbstractAzureRepositoryServiceTest { + + public AzureSnapshotRestoreTest() { + super(AzureStorageServiceMock.class, "/snapshot-test/repo-" + randomInt()); + } + + @Test + public void testSimpleWorkflow() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", basePath); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put("base_path", basePath) + .put("chunk_size", randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> delete some data"); + for (int i = 0; i < 50; i++) { + client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); + } + for (int i = 50; i < 100; i++) { + client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); + } + for (int i = 0; i < 100; i += 2) { + client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); + } + refresh(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + logger.info("--> close indices"); + client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); + assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); + + // Test restore after index deletion + logger.info("--> delete indices"); + wipeIndices("test-idx-1", "test-idx-2"); + logger.info("--> restore one index after deletion"); + restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); + ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); + assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); + } + +} diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java new file mode 100644 index 00000000000..689bee26b21 --- /dev/null +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.collect.ImmutableMap; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * In memory storage for unit tests + */ +public class AzureStorageServiceMock extends AbstractLifecycleComponent + implements AzureStorageService { + + protected Map blobs = new ConcurrentHashMap(); + + @Inject + protected AzureStorageServiceMock(Settings settings) { + super(settings); + } + + @Override + public boolean doesContainerExist(String container) { + return true; + } + + @Override + public void removeContainer(String container) { + } + + @Override + public void createContainer(String container) { + } + + @Override + public void deleteFiles(String container, String path) { + } + + @Override + public boolean blobExists(String container, String blob) { + return blobs.containsKey(blob); + } + + @Override + public void deleteBlob(String container, String blob) { + blobs.remove(blob); + } + + @Override + public InputStream getInputStream(String container, String blob) { + return new ByteArrayInputStream(blobs.get(blob)); + } + + @Override + public ImmutableMap listBlobsByPrefix(String container, String prefix) { + ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); + for (String blobName : blobs.keySet()) { + if (Strings.startsWithIgnoreCase(blobName, prefix)) { + blobsBuilder.put(blobName, new PlainBlobMetaData(blobName, blobs.get(blobName).length)); + } + } + ImmutableMap map = blobsBuilder.build(); + return map; + } + + @Override + public void putObject(String container, String blob, InputStream is, long length) { + try { + ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + + int nRead; + byte[] data = new byte[65535]; + + while ((nRead = is.read(data, 0, data.length)) != -1) { + buffer.write(data, 0, nRead); + } + + buffer.flush(); + + blobs.put(blob, buffer.toByteArray()); + } catch (IOException e) { + } + } + + @Override + protected void doStart() throws ElasticsearchException { + } + + @Override + protected void doStop() throws ElasticsearchException { + } + + @Override + protected void doClose() throws ElasticsearchException { + } +} diff --git a/src/test/resources/elasticsearch.yml b/src/test/resources/elasticsearch.yml index 07536b8c77b..d84f690e5a8 100644 --- a/src/test/resources/elasticsearch.yml +++ b/src/test/resources/elasticsearch.yml @@ -1,4 +1,4 @@ -# Licensed to ElasticSearch under one or more contributor +# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with this work for additional # information regarding copyright ownership. ElasticSearch licenses this file to you # under the Apache License, Version 2.0 (the "License"); you may not use this @@ -25,6 +25,16 @@ # password: YOUR-PASSWORD # subscription_id: YOUR-AZURE-SUBSCRIPTION-ID # service_name: YOUR-AZURE-SERVICE-NAME +# storage_account: "YOUR-AZURE-STORAGE-NAME" +# storage_key: "YOUR-AZURE-STORAGE-KEY" # # discovery: # type: azure +# +# repositories: +# azure: +# container: "NAME-OF-CONTAINER-USED-FOR-SNAPSHOTS" #optional default to "elasticsearch-snapshots" +# base_path: "path/to/snapshots" #optional default to empty +# concurrent_streams: 5 #optional default to 5 +# chunk_size: 64mb #optional default to "64mb" +# compress: false #optional default to false diff --git a/src/test/resources/log4j.xml b/src/test/resources/log4j.xml index 95b3bf51c77..be428fd906e 100644 --- a/src/test/resources/log4j.xml +++ b/src/test/resources/log4j.xml @@ -1,5 +1,5 @@ - + + + - + From a8c20b600240ade26f5f7f24160527d4a336f937 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 17 Mar 2014 19:25:39 +0100 Subject: [PATCH 022/125] Fix typo in tests --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f5528ae431..2cff4aa0d72 100644 --- a/README.md +++ b/README.md @@ -396,7 +396,7 @@ Integrations tests in this plugin require working Azure configuration and theref To enable tests prepare a config file elasticsearch.yml with the following content: ``` - repositories: + cloud: azure: account: "YOUR-AZURE-STORAGE-NAME" key: "YOUR-AZURE-STORAGE-KEY" From b7240d7b0d2650b90f106ab06688e0e895420423 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 17 Mar 2014 19:36:18 +0100 Subject: [PATCH 023/125] Fix azure plugin description It now also add `azure` repositories --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index cf31508e264..dd6024451bb 100644 --- a/pom.xml +++ b/pom.xml @@ -19,7 +19,7 @@ governing permissions and limitations under the License. --> 2.1.0-SNAPSHOT jar Elasticsearch Azure cloud plugin - The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. + The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories. https://github.com/elasticsearch/elasticsearch-cloud-azure/ 2013 From 39c3cbad9873833d63712dd7289fcbeb69b990bf Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 17 Mar 2014 19:36:52 +0100 Subject: [PATCH 024/125] prepare release elasticsearch-cloud-azure-2.1.0 --- README.md | 4 ++-- pom.xml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2cff4aa0d72..b6611d9e168 100644 --- a/README.md +++ b/README.md @@ -3,14 +3,14 @@ Azure Cloud Plugin for Elasticsearch The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. -In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.0.0`. +In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.1.0`. * For 1.0.x elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). * For 0.90.x elasticsearch versions, look at [1.x branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/1.x). | Azure Cloud Plugin | elasticsearch | Release date | |-----------------------------|---------------------|:------------:| -| 2.1.0-SNAPSHOT | 1.0 -> master | XXXX-XX-XX | +| 2.1.0 | 1.0 -> master | 2014-03-17 | | 2.0.0 | 1.0 -> master | 2014-02-14 | diff --git a/pom.xml b/pom.xml index dd6024451bb..3b62b6bf049 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 2.1.0-SNAPSHOT + 2.1.0 jar Elasticsearch Azure cloud plugin The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories. From 39699910916b7c2c98c6067f5a5b8bc6b47e47da Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 17 Mar 2014 19:42:05 +0100 Subject: [PATCH 025/125] prepare for next development iteration --- README.md | 1 + pom.xml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b6611d9e168..d1666f33fb7 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ In order to install the plugin, simply run: `bin/plugin -install elasticsearch/e | Azure Cloud Plugin | elasticsearch | Release date | |-----------------------------|---------------------|:------------:| +| 2.2.0-SNAPSHOT | 1.0 -> master | XXXX-XX-XX | | 2.1.0 | 1.0 -> master | 2014-03-17 | | 2.0.0 | 1.0 -> master | 2014-02-14 | diff --git a/pom.xml b/pom.xml index 3b62b6bf049..afb2e0a71a9 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 2.1.0 + 2.2.0-SNAPSHOT jar Elasticsearch Azure cloud plugin The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories. From 238278107216a967e841afb5523d8270c58a8586 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 19 Mar 2014 22:25:15 +0100 Subject: [PATCH 026/125] Disable java and maven version checking And fix typo in email html --- dev-tools/build_release.py | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 9166b09e7e3..74acd8c5f4e 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -43,7 +43,7 @@ from os.path import dirname, abspath The script takes over almost all steps necessary for a release from a high level point of view it does the following things: - - run prerequisite checks ie. check for Java 1.6 being present or S3 credentials available as env variables + - run prerequisite checks ie. check for S3 credentials available as env variables - detect the version to release from the specified branch (--branch) or the current branch - creates a release branch & updates pom.xml and README.md to point to a release version rather than a snapshot - builds the artifacts @@ -109,18 +109,6 @@ def java_exe(): path = JAVA_HOME return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) -def verify_java_version(version): - s = os.popen('%s; java -version 2>&1' % java_exe()).read() - if s.find(' version "%s.' % version) == -1: - raise RuntimeError('got wrong version for java %s:\n%s' % (version, s)) - -# Verifies the java version. We guarantee that we run with Java 1.6 -# If 1.6 is not available fail the build! -def verify_mvn_java_version(version, mvn): - s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read() - if s.find('Java version: %s' % version) == -1: - raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s)) - # Returns the hash of the current git HEAD revision def get_head_hash(): return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip() @@ -133,9 +121,6 @@ def get_tag_hash(tag): def get_current_branch(): return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip() -verify_java_version('1.6') # we require to build with 1.6 -verify_mvn_java_version('1.6', MVN) - # Utility that returns the name of the release branch for a given version def release_branch(version): return 'release_branch_%s' % version @@ -218,7 +203,7 @@ def add_version_snapshot(readme_file, release, snapshot): # If we find pattern, we copy the line and replace its content if line.find(pattern) >= 0: return line.replace(pattern, replacement).replace('%s' % (datetime.datetime.now().strftime("%Y-%m-%d")), - 'XXXX-XX-XX')+line + 'XXXX-XX-XX')+line else: return line process_file(readme_file, callback) @@ -356,15 +341,15 @@ def format_issues_html(issues, title='Fix'): if len(issues) > 0: response += '

    %s

    \n
      \n' % title for issue in issues: - response += '[%s] - %s\n' % (issue.html_url, issue.number, issue.title) + response += '
    • [%s] - %s\n' % (issue.html_url, issue.number, issue.title) response += '
    \n' return response def get_github_repository(reponame, - login=env.get('GITHUB_LOGIN', None), - password=env.get('GITHUB_PASSWORD', None), - key=env.get('GITHUB_KEY', None)): + login=env.get('GITHUB_LOGIN', None), + password=env.get('GITHUB_PASSWORD', None), + key=env.get('GITHUB_KEY', None)): if login: g = github3.login(login, password) elif key: From 96c280f43ddb96bc0f72849fd47c0347a9a96a6f Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 26 Mar 2014 19:33:12 +0100 Subject: [PATCH 027/125] Update to elasticsearch 1.1.0 Closes #11. --- pom.xml | 4 ++-- .../org/elasticsearch/discovery/azure/AzureDiscovery.java | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index afb2e0a71a9..3b5e36facb7 100644 --- a/pom.xml +++ b/pom.xml @@ -43,8 +43,8 @@ governing permissions and limitations under the License. --> - 1.0.0 - 4.6.1 + 1.1.0 + 4.7.0 onerror 1 true diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 51f8b991d0d..155ce477820 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.collect.ImmutableList; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; @@ -46,8 +47,10 @@ public class AzureDiscovery extends ZenDiscovery { @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, - DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, discoveryNodeService, pingService, Version.CURRENT); + DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService, + DiscoverySettings discoverySettings) { + super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + discoveryNodeService, pingService, Version.CURRENT, discoverySettings); if (settings.getAsBoolean("cloud.enabled", true)) { ImmutableList zenPings = pingService.zenPings(); UnicastZenPing unicastZenPing = null; From 03895ea7681d204002661036fb2d16f3062df209 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 26 Mar 2014 19:38:31 +0100 Subject: [PATCH 028/125] Create branches according to elasticsearch versions We create branches: * es-0.90 for elasticsearch 0.90 * es-1.0 for elasticsearch 1.0 * es-1.1 for elasticsearch 1.1 * master for elasticsearch master We also check that before releasing we don't have a dependency to an elasticsearch SNAPSHOT version. Add links to each version in documentation (cherry picked from commit 65d4862) --- README.md | 14 +++++++++----- dev-tools/build_release.py | 29 +++++++++++++++++++++++++++++ pom.xml | 2 +- 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index d1666f33fb7..2c5bed9f03a 100644 --- a/README.md +++ b/README.md @@ -5,14 +5,18 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.1.0`. -* For 1.0.x elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). -* For 0.90.x elasticsearch versions, look at [1.x branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/1.x). +* For master elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). +* For 1.1.x elasticsearch versions, look at [es-1.1 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.1). +* For 1.0.x elasticsearch versions, look at [es-1.0 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.0). +* For 0.90.x elasticsearch versions, look at [es-0.90 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-0.90). | Azure Cloud Plugin | elasticsearch | Release date | |-----------------------------|---------------------|:------------:| -| 2.2.0-SNAPSHOT | 1.0 -> master | XXXX-XX-XX | -| 2.1.0 | 1.0 -> master | 2014-03-17 | -| 2.0.0 | 1.0 -> master | 2014-02-14 | +| 3.0.0-SNAPSHOT | master | XXXX-XX-XX | + +Please read documentation relative to the version you are using: + +* [3.0.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/master/README.md) Azure Virtual Machine Discovery diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 74acd8c5f4e..db8345440c7 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -208,6 +208,29 @@ def add_version_snapshot(readme_file, release, snapshot): return line process_file(readme_file, callback) +# Moves the README.md file from a snapshot to a release (documentation link) +def remove_documentation_snapshot(readme_file, repo_url, release, branch): + pattern = '* [%s-SNAPSHOT](%sblob/%s/README.md)' % (release, repo_url, branch) + replacement = '* [%s](%sblob/v%s/README.md)' % (release, repo_url, release) + def callback(line): + # If we find pattern, we replace its content + if line.find(pattern) >= 0: + return line.replace(pattern, replacement) + else: + return line + process_file(readme_file, callback) + +# Add in README.markdown file the documentation for the next version +def add_documentation_snapshot(readme_file, repo_url, release, snapshot, branch): + pattern = '* [%s](%sblob/v%s/README.md)' % (release, repo_url, release) + replacement = '* [%s-SNAPSHOT](%sblob/%s/README.md)' % (snapshot, repo_url, branch) + def callback(line): + # If we find pattern, we copy the line and replace its content + if line.find(pattern) >= 0: + return line.replace(pattern, replacement)+line + else: + return line + process_file(readme_file, callback) # Set release date in README.md file def set_date(readme_file): @@ -603,8 +626,12 @@ if __name__ == '__main__': artifact_name = find_from_pom('name') artifact_description = find_from_pom('description') project_url = find_from_pom('url') + elasticsearch_version = find_from_pom('elasticsearch.version') print(' Artifact Id: [%s]' % artifact_id) print(' Release version: [%s]' % release_version) + print(' Elasticsearch: [%s]' % elasticsearch_version) + if elasticsearch_version.find('-SNAPSHOT') != -1: + raise RuntimeError('Can not release with a SNAPSHOT elasticsearch dependency: %s' % elasticsearch_version) # extract snapshot default_snapshot_version = guess_snapshot(release_version) @@ -626,6 +653,7 @@ if __name__ == '__main__': try: pending_files = [POM_FILE, README_FILE] remove_maven_snapshot(POM_FILE, release_version) + remove_documentation_snapshot(README_FILE, project_url, release_version, src_branch) remove_version_snapshot(README_FILE, release_version) set_date(README_FILE) set_install_instructions(README_FILE, artifact_id, release_version) @@ -657,6 +685,7 @@ if __name__ == '__main__': add_maven_snapshot(POM_FILE, release_version, snapshot_version) add_version_snapshot(README_FILE, release_version, snapshot_version) + add_documentation_snapshot(README_FILE, project_url, release_version, snapshot_version, src_branch) add_pending_files(*pending_files) commit_snapshot() diff --git a/pom.xml b/pom.xml index 3b5e36facb7..613cd6fe103 100644 --- a/pom.xml +++ b/pom.xml @@ -43,7 +43,7 @@ governing permissions and limitations under the License. --> - 1.1.0 + 2.0.0-SNAPSHOT 4.7.0 onerror 1 From 6e1ab497574d4345a7a7e29a612619d44eecdcb1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 18 Apr 2014 11:30:02 +0200 Subject: [PATCH 029/125] Update to elasticsearch 1.1.1 (Impact only on tests as test framework moved a bit) (cherry picked from commit e5a32c0) --- pom.xml | 2 +- .../repositories/azure/AzureSnapshotRestoreITest.java | 4 ++-- .../repositories/azure/AzureSnapshotRestoreTest.java | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index 613cd6fe103..b5128dcdc1f 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.7.0 + 4.7.1 onerror 1 true diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 33220cad250..bcf66355ae3 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -147,7 +147,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { // Test restore after index deletion logger.info("--> delete indices"); - wipeIndices("test-idx-1", "test-idx-2"); + cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -204,7 +204,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { // Test restore after index deletion logger.info("--> delete indices"); - wipeIndices("test-idx-1", "test-idx-2"); + cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion from snapshot 1"); RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1").execute().actionGet(); assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0)); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java index 03b808cd654..e28a8261c87 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java @@ -104,7 +104,7 @@ public class AzureSnapshotRestoreTest extends AbstractAzureRepositoryServiceTest // Test restore after index deletion logger.info("--> delete indices"); - wipeIndices("test-idx-1", "test-idx-2"); + cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); From b433c9889bd5e9a4a03b70645b289cca21aa072e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 29 Apr 2014 21:46:33 +0200 Subject: [PATCH 030/125] Create branch es-1.2 --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2c5bed9f03a..3d656fb1580 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.1.0`. * For master elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). +* For 1.2.x elasticsearch versions, look at [es-1.2 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.2). * For 1.1.x elasticsearch versions, look at [es-1.1 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.1). * For 1.0.x elasticsearch versions, look at [es-1.0 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.0). * For 0.90.x elasticsearch versions, look at [es-0.90 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-0.90). From d020012ef2b221161842520d46e738928a30dde3 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 29 Apr 2014 21:53:00 +0200 Subject: [PATCH 031/125] Update to Lucene 4.8 Closes #14. (cherry picked from commit 3cf0368) --- pom.xml | 4 ++-- .../java/org/elasticsearch/azure/itest/AzureSimpleITest.java | 2 +- .../org/elasticsearch/discovery/azure/AzureSimpleTest.java | 2 +- .../discovery/azure/AzureTwoStartedNodesTest.java | 2 +- .../repositories/azure/AzureSnapshotRestoreITest.java | 2 +- .../repositories/azure/AzureSnapshotRestoreTest.java | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pom.xml b/pom.xml index b5128dcdc1f..4dd9b2f24a1 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,7 @@ governing permissions and limitations under the License. --> 4.0.0 org.elasticsearch elasticsearch-cloud-azure - 2.2.0-SNAPSHOT + 3.0.0-SNAPSHOT jar Elasticsearch Azure cloud plugin The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories. @@ -44,7 +44,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.7.1 + 4.8.0 onerror 1 true diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java index 0591cc1fb88..4c6e2ffd5c8 100644 --- a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -35,7 +35,7 @@ import org.junit.Test; @AbstractAzureTest.AzureTest @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.TEST, - numNodes = 1, + numDataNodes = 1, transportClientRatio = 0.0) public class AzureSimpleITest extends AbstractAzureTest { diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index 478a2a9f416..e8379682c13 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -24,7 +24,7 @@ import org.junit.Test; @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.TEST, - numNodes = 1, + numDataNodes = 1, transportClientRatio = 0.0) public class AzureSimpleTest extends AbstractAzureComputeServiceTest { diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index 7791b57dd08..7a2c39cb8e5 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -24,7 +24,7 @@ import org.junit.Test; @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.TEST, - numNodes = 2, + numDataNodes = 2, transportClientRatio = 0.0) public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index bcf66355ae3..72cb868d102 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -53,7 +53,7 @@ import static org.hamcrest.Matchers.greaterThan; @AbstractAzureTest.AzureTest @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.SUITE, - numNodes = 2, + numDataNodes = 2, transportClientRatio = 0.0) public class AzureSnapshotRestoreITest extends AbstractAzureTest { diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java index e28a8261c87..0e8b49f9709 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java @@ -35,7 +35,7 @@ import static org.hamcrest.Matchers.greaterThan; @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.SUITE, - numNodes = 1, + numDataNodes = 1, transportClientRatio = 0.0) public class AzureSnapshotRestoreTest extends AbstractAzureRepositoryServiceTest { From 943f2552f9c6a7371b4d0ac0ac7d12bb9f59058a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 19 Jun 2014 18:57:35 +0200 Subject: [PATCH 032/125] Update to elasticsearch 1.3.0 And refactor integration tests Closes #17. (cherry picked from commit f5b444e) (cherry picked from commit 1e63f86) --- README.md | 57 ++++++------- pom.xml | 31 ++++--- .../azure/AzureUnicastHostsProvider.java | 7 +- .../azure/itest/AzureSimpleITest.java | 1 + .../AbstractAzureComputeServiceTest.java | 16 +++- .../azure/AzureMinimumMasterNodesTest.java | 83 +++++++++++++++++++ .../discovery/azure/AzureSimpleTest.java | 18 ++-- .../azure/AzureTwoStartedNodesTest.java | 18 +++- .../AbstractAzureRepositoryServiceTest.java | 6 +- .../azure/AzureSnapshotRestoreITest.java | 4 +- .../azure/AzureSnapshotRestoreTest.java | 1 + 11 files changed, 174 insertions(+), 68 deletions(-) create mode 100644 src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java diff --git a/README.md b/README.md index 3d656fb1580..3f60e44d83f 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,10 @@ Azure Cloud Plugin for Elasticsearch The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. -In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.1.0`. +In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.3.0`. * For master elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). +* For 1.3.x elasticsearch versions, look at [es-1.3 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3). * For 1.2.x elasticsearch versions, look at [es-1.2 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.2). * For 1.1.x elasticsearch versions, look at [es-1.1 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.1). * For 1.0.x elasticsearch versions, look at [es-1.0 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.0). @@ -27,17 +28,17 @@ Azure VM discovery allows to use the azure APIs to perform automatic discovery ( multicast environments). Here is a simple sample configuration: ``` - cloud: - azure: - keystore: /path/to/keystore - password: your_password_for_keystore - subscription_id: your_azure_subscription_id - service_name: your_azure_cloud_service_name - discovery: - type: azure +cloud: + azure: + keystore: /path/to/keystore + password: your_password_for_keystore + subscription_id: your_azure_subscription_id + service_name: your_azure_cloud_service_name +discovery: + type: azure - # recommended - # path.data: /mnt/resource/elasticsearch/data +# recommended +# path.data: /mnt/resource/elasticsearch/data ``` How to start (short story) @@ -261,17 +262,17 @@ And add the following lines: ```yaml # If you don't remember your account id, you may get it with `azure account list` - cloud: - azure: - keystore: /home/elasticsearch/azurekeystore.pkcs12 - password: your_password_for_keystore - subscription_id: your_azure_subscription_id - service_name: your_azure_cloud_service_name - discovery: - type: azure +cloud: + azure: + keystore: /home/elasticsearch/azurekeystore.pkcs12 + password: your_password_for_keystore + subscription_id: your_azure_subscription_id + service_name: your_azure_cloud_service_name +discovery: + type: azure # Recommended - path.data: /mnt/resource/elasticsearch/data +path.data: /mnt/resource/elasticsearch/data ``` Restart elasticsearch: @@ -347,10 +348,10 @@ Azure Repository To enable Azure repositories, you have first to set your azure storage settings: ``` - cloud: - azure: - storage_account: your_azure_storage_account - storage_key: your_azure_storage_key +cloud: + azure: + storage_account: your_azure_storage_account + storage_key: your_azure_storage_key ``` The Azure repository supports following settings: @@ -402,10 +403,10 @@ Integrations tests in this plugin require working Azure configuration and theref To enable tests prepare a config file elasticsearch.yml with the following content: ``` - cloud: - azure: - account: "YOUR-AZURE-STORAGE-NAME" - key: "YOUR-AZURE-STORAGE-KEY" +cloud: + azure: + account: "YOUR-AZURE-STORAGE-NAME" + key: "YOUR-AZURE-STORAGE-KEY" ``` Replaces `account`, `key` with your settings. Please, note that the test will delete all snapshot/restore related files in the specified bucket. diff --git a/pom.xml b/pom.xml index 4dd9b2f24a1..2104d97fb1a 100644 --- a/pom.xml +++ b/pom.xml @@ -44,9 +44,8 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.8.0 + 4.8.1 onerror - 1 true onerror @@ -56,6 +55,18 @@ governing permissions and limitations under the License. --> + + org.hamcrest + hamcrest-core + 1.3.RC2 + test + + + org.hamcrest + hamcrest-library + 1.3.RC2 + test + org.apache.lucene lucene-test-framework @@ -86,18 +97,6 @@ governing permissions and limitations under the License. --> test-jar test - - org.hamcrest - hamcrest-core - 1.3.RC2 - test - - - org.hamcrest - hamcrest-library - 1.3.RC2 - test - @@ -157,7 +156,7 @@ governing permissions and limitations under the License. --> - ${tests.jvms} + 1 @@ -204,8 +203,6 @@ governing permissions and limitations under the License. --> ${tests.integration} ${tests.cluster_seed} ${tests.client.ratio} - ${env.ES_TEST_LOCAL} - ${es.node.mode} ${es.config} ${es.logger.level} true diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index fd5e87d1c41..57e0d99b7e7 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -139,10 +138,8 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic } else { TransportAddress[] addresses = transportService.addressesFromString(networkAddress); // we only limit to 1 addresses, makes no sense to ping 100 ports - for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) { - logger.trace("adding {}, transport_address {}", networkAddress, addresses[i]); - cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getName() + "-" + i, addresses[i], Version.CURRENT)); - } + logger.trace("adding {}, transport_address {}", networkAddress, addresses[0]); + cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getName(), addresses[0], Version.CURRENT)); } } diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java index 4c6e2ffd5c8..62be0c2c0dc 100644 --- a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -36,6 +36,7 @@ import org.junit.Test; @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 1, + numClientNodes = 0, transportClientRatio = 0.0) public class AzureSimpleITest extends AbstractAzureTest { diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java index 7e0cb73463f..0e6834cd658 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -24,6 +24,7 @@ import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.PluginsService; public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest { @@ -41,13 +42,22 @@ public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest assertEquals(expected, nodeInfos.getNodes().length); } - @Override - protected Settings nodeSettings(int nodeOrdinal) { + protected Settings settingsBuilder() { ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true) + .put("discovery.type", "azure") .put("cloud.azure.api.impl", mock) // We add a fake subscription_id to start mock compute service .put("cloud.azure.subscription_id", "fake") - .put("cloud.azure.refresh_interval", "5s"); + .put("cloud.azure.refresh_interval", "5s") + .put("cloud.azure.keystore", "dummy") + .put("cloud.azure.password", "dummy") + .put("cloud.azure.service_name", "dummy") + .put("cloud.azure.refresh_interval", "5s") + // Make the tests run faster + .put("discovery.zen.join.timeout", "100ms") + .put("discovery.zen.ping.timeout", "10ms") + .put("discovery.initial_state_timeout", "300ms"); return builder.build(); } diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java new file mode 100644 index 00000000000..f2d7520dd00 --- /dev/null +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.azure; + +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.io.IOException; + +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +/** + * Reported issue in #15 + * (https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/15) + */ +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, + numDataNodes = 0, + transportClientRatio = 0.0, + numClientNodes = 0) +public class AzureMinimumMasterNodesTest extends AbstractAzureComputeServiceTest { + + public AzureMinimumMasterNodesTest() { + super(AzureComputeServiceTwoNodesMock.class); + } + + @Override + protected final Settings settingsBuilder() { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + .put("discovery.zen.minimum_master_nodes", 2) + .put(super.settingsBuilder()); + return builder.build(); + } + + @Test + public void simpleOnlyMasterNodeElection() throws IOException { + logger.info("--> start data node / non master node"); + internalCluster().startNode(settingsBuilder()); + try { + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue()); + fail("should not be able to find master"); + } catch (MasterNotDiscoveredException e) { + // all is well, no master elected + } + logger.info("--> start another node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + logger.info("--> stop master node"); + internalCluster().stopCurrentMasterNode(); + + try { + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), nullValue()); + fail("should not be able to find master"); + } catch (MasterNotDiscoveredException e) { + // all is well, no master elected + } + + logger.info("--> start another node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + } +} diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index e8379682c13..0d9fbd939f8 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -22,10 +22,12 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -@ElasticsearchIntegrationTest.ClusterScope( - scope = ElasticsearchIntegrationTest.Scope.TEST, - numDataNodes = 1, - transportClientRatio = 0.0) +import static org.hamcrest.Matchers.notNullValue; + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, + numDataNodes = 0, + transportClientRatio = 0.0, + numClientNodes = 0) public class AzureSimpleTest extends AbstractAzureComputeServiceTest { public AzureSimpleTest() { @@ -34,9 +36,11 @@ public class AzureSimpleTest extends AbstractAzureComputeServiceTest { @Test public void one_node_should_run() { - // We expect having 2 nodes as part of the cluster, let's test that + logger.info("--> start one node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 1 node as part of the cluster, let's test that checkNumberOfNodes(1); - - } } diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index 7a2c39cb8e5..d6d807a73b3 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -22,10 +22,12 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -@ElasticsearchIntegrationTest.ClusterScope( - scope = ElasticsearchIntegrationTest.Scope.TEST, - numDataNodes = 2, - transportClientRatio = 0.0) +import static org.hamcrest.Matchers.notNullValue; + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, + numDataNodes = 0, + transportClientRatio = 0.0, + numClientNodes = 0) public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { public AzureTwoStartedNodesTest() { @@ -34,6 +36,14 @@ public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { @Test public void two_nodes_should_run() { + logger.info("--> start first node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + logger.info("--> start another node"); + internalCluster().startNode(settingsBuilder()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + // We expect having 2 nodes as part of the cluster, let's test that checkNumberOfNodes(2); } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java b/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java index 2cc457ad85e..4c92b910f0b 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java @@ -26,6 +26,7 @@ import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.test.store.MockDirectoryHelper; import org.junit.After; @@ -65,6 +66,7 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe @Override protected Settings nodeSettings(int nodeOrdinal) { ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true) .put("cloud.azure." + AzureStorageService.Fields.ACCOUNT, "mock_azure_account") .put("cloud.azure." + AzureStorageService.Fields.KEY, "mock_azure_key") .put("repositories.azure.api.impl", mock) @@ -94,9 +96,9 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe * Purge the test container */ public void cleanRepositoryFiles(String path) throws StorageException, ServiceException, URISyntaxException { - String container = cluster().getInstance(Settings.class).get("repositories.azure.container"); + String container = internalCluster().getInstance(Settings.class).get("repositories.azure.container"); logger.info("--> remove blobs in container [{}]", container); - AzureStorageService client = cluster().getInstance(AzureStorageService.class); + AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); client.deleteFiles(container, path); } } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 72cb868d102..5e683a8a398 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -245,10 +245,10 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { * Purge the test container */ public void cleanRepositoryFiles(String path) throws StorageException, ServiceException, URISyntaxException { - String container = cluster().getInstance(Settings.class).get("repositories.azure.container", + String container = internalCluster().getInstance(Settings.class).get("repositories.azure.container", AzureRepository.CONTAINER_DEFAULT); logger.info("--> remove blobs in container [{}], path [{}]", container, path); - AzureStorageService client = cluster().getInstance(AzureStorageService.class); + AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); // Remove starting / if any path = Strings.trimLeadingCharacter(path, '/'); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java index 0e8b49f9709..a9d187dad1d 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTest.java @@ -36,6 +36,7 @@ import static org.hamcrest.Matchers.greaterThan; @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 1, + numClientNodes = 0, transportClientRatio = 0.0) public class AzureSnapshotRestoreTest extends AbstractAzureRepositoryServiceTest { From 0bd6c72ac6f8d2e3b1b3a650dd630c76d8c164b6 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 30 Jul 2014 18:02:17 +0200 Subject: [PATCH 033/125] Tests: update to Lucene 4.9.0 Closes #24. (cherry picked from commit 13c60e4) --- README.md | 2 +- pom.xml | 4 +- .../cloud/azure/AbstractAzureTest.java | 49 +++++++++++-------- .../AbstractAzureComputeServiceTest.java | 4 +- .../azure/AzureMinimumMasterNodesTest.java | 2 +- .../azure/AzureSnapshotRestoreITest.java | 2 +- 6 files changed, 35 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 3f60e44d83f..cc2410bbbbf 100644 --- a/README.md +++ b/README.md @@ -414,7 +414,7 @@ Replaces `account`, `key` with your settings. Please, note that the test will de To run test: ```sh -mvn -Dtests.azure=true -Des.config=/path/to/config/file/elasticsearch.yml clean test +mvn -Dtests.azure=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test ``` diff --git a/pom.xml b/pom.xml index 2104d97fb1a..db6cd3d8d16 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.8.1 + 4.9.0 onerror true onerror @@ -203,7 +203,7 @@ governing permissions and limitations under the License. --> ${tests.integration} ${tests.cluster_seed} ${tests.client.ratio} - ${es.config} + ${tests.config} ${es.logger.level} true diff --git a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java index 3aeb1e2866f..4503c962364 100644 --- a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java +++ b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -20,6 +20,12 @@ package org.elasticsearch.cloud.azure; import com.carrotsearch.randomizedtesting.annotations.TestGroup; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.FailedToResolveConfigException; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import java.lang.annotation.Documented; @@ -34,27 +40,7 @@ public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { /** * Annotation for tests that require Azure to run. Azure tests are disabled by default. - *

    - * To enable test add -Dtests.azure=true -Des.config=/path/to/elasticsearch.yml - *

    - * The elasticsearch.yml file should contain the following keys - *

    -      cloud:
    -          azure:
    -              keystore: FULLPATH-TO-YOUR-KEYSTORE
    -              password: YOUR-PASSWORD
    -              subscription_id: YOUR-AZURE-SUBSCRIPTION-ID
    -              service_name: YOUR-AZURE-SERVICE-NAME
    -
    -      discovery:
    -              type: azure
    -
    -      repositories:
    -          azure:
    -              account: "yourstorageaccount"
    -              key: "storage key"
    -              container: "container name"
    -     * 
    + * See README file for details. */ @Documented @Inherited @@ -67,4 +53,25 @@ public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { */ public static final String SYSPROP_AZURE = "tests.azure"; + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ImmutableSettings.Builder settings = ImmutableSettings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true); + + Environment environment = new Environment(); + + // if explicit, just load it and don't load from env + try { + if (Strings.hasText(System.getProperty("tests.config"))) { + settings.loadFromUrl(environment.resolveConfig(System.getProperty("tests.config"))); + } else { + fail("to run integration tests, you need to set -Dtest.azure=true and -Dtests.config=/path/to/elasticsearch.yml"); + } + } catch (FailedToResolveConfigException exception) { + fail("your test configuration file is incorrect: " + System.getProperty("tests.config")); + } + return settings.build(); + } + } diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java index 0e6834cd658..69dc107a603 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -24,7 +24,6 @@ import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.PluginsService; public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest { @@ -44,7 +43,6 @@ public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest protected Settings settingsBuilder() { ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() - .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true) .put("discovery.type", "azure") .put("cloud.azure.api.impl", mock) // We add a fake subscription_id to start mock compute service @@ -54,6 +52,8 @@ public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest .put("cloud.azure.password", "dummy") .put("cloud.azure.service_name", "dummy") .put("cloud.azure.refresh_interval", "5s") + // We need the network to make the mock working + .put("node.mode", "network") // Make the tests run faster .put("discovery.zen.join.timeout", "100ms") .put("discovery.zen.ping.timeout", "10ms") diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java index f2d7520dd00..b6614500c10 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.nullValue; * Reported issue in #15 * (https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/15) */ -@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0, transportClientRatio = 0.0, numClientNodes = 0) diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 5e683a8a398..007794b5884 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.greaterThan; /** * This test needs Azure to run and -Dtests.azure=true to be set - * and -Des.config=/path/to/elasticsearch.yml + * and -Dtests.config=/path/to/elasticsearch.yml * @see org.elasticsearch.cloud.azure.AbstractAzureTest */ @AbstractAzureTest.AzureTest From 0005eab665e3130704efb1a54099ab78d58ec06a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 30 Jul 2014 18:07:04 +0200 Subject: [PATCH 034/125] Update to Azure Java SDK 0.4.6 Latest Azure Java SDK is 0.4.6 when looking at http://search.maven.org/#browse%7C-589510877 Closes #25. (cherry picked from commit 89be7b4) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index db6cd3d8d16..a752d6531d2 100644 --- a/pom.xml +++ b/pom.xml @@ -82,7 +82,7 @@ governing permissions and limitations under the License. --> com.microsoft.windowsazure microsoft-windowsazure-api - 0.4.5 + 0.4.6 log4j From 976448e3395609276446e2b8dbeddb046823aa2e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 30 Jul 2014 19:06:52 +0200 Subject: [PATCH 035/125] `_` is a forbidden character for container (repository) According to [Containers naming guide](http://msdn.microsoft.com/en-us/library/dd135715.aspx): > A container name must be a valid DNS name, conforming to the following naming rules: > > * Container names must start with a letter or number, and can contain only letters, numbers, and the dash (-) character. > * Every dash (-) character must be immediately preceded and followed by a letter or number; consecutive dashes are not permitted in container names. > * All letters in a container name must be lowercase. > * Container names must be from 3 through 63 characters long. We need to fix the documentation and control that before calling Azure API. The validation will come with issue #27. Closes #21. (cherry picked from commit 6531165) --- README.md | 14 +++++- .../azure/AzureSnapshotRestoreITest.java | 44 +++++++++++++++++++ 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cc2410bbbbf..975ed8c1ee9 100644 --- a/README.md +++ b/README.md @@ -395,9 +395,21 @@ client.admin().cluster().preparePutRepository("my_backup3") ).get(); ``` +Repository validation rules +--------------------------- + +According to the [containers naming guide](http://msdn.microsoft.com/en-us/library/dd135715.aspx), a container name must +be a valid DNS name, conforming to the following naming rules: + +* Container names must start with a letter or number, and can contain only letters, numbers, and the dash (-) character. +* Every dash (-) character must be immediately preceded and followed by a letter or number; consecutive dashes are not +permitted in container names. +* All letters in a container name must be lowercase. +* Container names must be from 3 through 63 characters long. + Testing -------- +======= Integrations tests in this plugin require working Azure configuration and therefore disabled by default. To enable tests prepare a config file elasticsearch.yml with the following content: diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 007794b5884..fe3dae5a2bb 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -22,6 +22,7 @@ package org.elasticsearch.repositories.azure; import com.microsoft.windowsazure.services.core.ServiceException; import com.microsoft.windowsazure.services.core.storage.StorageException; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -32,6 +33,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -44,6 +46,7 @@ import java.net.URISyntaxException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; /** * This test needs Azure to run and -Dtests.azure=true to be set @@ -224,6 +227,47 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(true)); } + /** + * For issue #21: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21 + */ + @Test @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21") + public void testForbiddenContainerName() { + checkContainerName("", false); + checkContainerName("es", false); + checkContainerName("-elasticsearch", false); + checkContainerName("elasticsearch--integration", false); + checkContainerName("elasticsearch_integration", false); + checkContainerName("ElAsTicsearch_integration", false); + checkContainerName("123456789-123456789-123456789-123456789-123456789-123456789-1234", false); + checkContainerName("123456789-123456789-123456789-123456789-123456789-123456789-123", true); + checkContainerName("elasticsearch-integration", true); + checkContainerName("elasticsearch-integration-007", true); + } + + /** + * Create repository with wrong or correct container name + * @param container Container name we want to create + * @param correct Is this container name correct + */ + private void checkContainerName(String container, boolean correct) { + logger.info("--> creating azure repository with container name [{}]", container); + try { + PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, container) + .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), is(correct)); + client().admin().cluster().prepareDeleteRepository("test-repo").get(); + } catch (RepositoryException e) { + if (correct) { + // We did not expect any exception here :( + throw e; + } + } + } + /** * Deletes repositories, supports wildcard notation. */ From d613f5f09715938bad8479c8db28ad5fdcab5273 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 30 Jul 2014 19:21:36 +0200 Subject: [PATCH 036/125] Wrong exception thrown when snapshot doesn't exist As for https://github.com/elasticsearch/elasticsearch-cloud-aws/issues/86, we should raise a `SnapshotMissingException`. Closes #23. (cherry picked from commit 61ddb60) --- .../blobstore/AbstractAzureBlobContainer.java | 7 +++++ .../azure/AzureSnapshotRestoreITest.java | 29 +++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java index 2d6bac608b3..cc94510c36b 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.collect.ImmutableMap; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; @@ -95,6 +96,12 @@ public class AbstractAzureBlobContainer extends AbstractBlobContainer { } is.close(); listener.onCompleted(); + } catch (ServiceException e) { + if (e.getHttpStatusCode() == 404) { + listener.onFailure(new FileNotFoundException(e.getMessage())); + } else { + listener.onFailure(e); + } } catch (Throwable e) { IOUtils.closeWhileHandlingException(is); listener.onFailure(e); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index fe3dae5a2bb..18173cebf6d 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.store.MockDirectoryHelper; @@ -44,9 +45,7 @@ import org.junit.Test; import java.net.URISyntaxException; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.*; /** * This test needs Azure to run and -Dtests.azure=true to be set @@ -268,6 +267,30 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { } } + /** + * Test case for issue #23: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/23 + */ + @Test + public void testNonExistingRepo_23() { + Client client = client(); + logger.info("--> creating azure repository with path [{}]", basePath); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration") + .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + logger.info("--> restore non existing snapshot"); + try { + client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).execute().actionGet(); + fail("Shouldn't be here"); + } catch (SnapshotMissingException ex) { + // Expected + } + } + /** * Deletes repositories, supports wildcard notation. */ From 6046f172c89eeb2180ead7784adc4595e6289260 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 31 Jul 2014 00:11:53 +0200 Subject: [PATCH 037/125] listBlobsByPrefix uses a wrong path When listing existing blobs for an azure repository, `path` to look at is incorrectly computed which leads to 404 errors. Closes #26. (cherry picked from commit 656fadc) --- .../cloud/azure/AzureStorageService.java | 2 +- .../cloud/azure/AzureStorageServiceImpl.java | 17 ++- .../blobstore/AbstractAzureBlobContainer.java | 16 +-- .../azure/AzureSnapshotRestoreITest.java | 122 ++++++++++++++---- .../azure/AzureStorageServiceMock.java | 2 +- 5 files changed, 113 insertions(+), 46 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java index f346348ed88..700fc6ab4ac 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java @@ -57,7 +57,7 @@ public interface AzureStorageService { InputStream getInputStream(String container, String blob) throws ServiceException; - ImmutableMap listBlobsByPrefix(String container, String prefix) throws URISyntaxException, StorageException, ServiceException; + ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException; void putObject(String container, String blob, InputStream is, long length) throws URISyntaxException, StorageException, IOException; diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index c396aaae679..5c617798eff 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -113,7 +113,14 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent listBlobsByPrefix(String container, String prefix) throws URISyntaxException, StorageException, ServiceException { - logger.debug("listBlobsByPrefix container [{}], prefix [{}]", container, prefix); + public ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException { + logger.debug("listBlobsByPrefix container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); CloudBlobContainer blob_container = client.getContainerReference(container); if (blob_container.exists()) { - Iterable blobs = blob_container.listBlobs(prefix); + Iterable blobs = blob_container.listBlobs(keyPath + prefix); for (ListBlobItem blob : blobs) { URI uri = blob.getUri(); if (logger.isTraceEnabled()) { @@ -202,7 +209,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { - final String prefix; - if (blobNamePrefix != null) { - prefix = buildKey(blobNamePrefix); - } else { - prefix = keyPath; - } + public ImmutableMap listBlobsByPrefix(@Nullable String prefix) throws IOException { try { - return blobStore.client().listBlobsByPrefix(blobStore.container(), prefix); + return blobStore.client().listBlobsByPrefix(blobStore.container(), keyPath, prefix); } catch (URISyntaxException e) { - logger.warn("can not access [{}] in container {{}}: {}", blobNamePrefix, blobStore.container(), e.getMessage()); + logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); throw new IOException(e); } catch (StorageException e) { - logger.warn("can not access [{}] in container {{}}: {}", blobNamePrefix, blobStore.container(), e.getMessage()); + logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); throw new IOException(e); } catch (ServiceException e) { - logger.warn("can not access [{}] in container {{}}: {}", blobNamePrefix, blobStore.container(), e.getMessage()); + logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); throw new IOException(e); } } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 18173cebf6d..86c1c3ef536 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResp import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.cluster.ClusterState; @@ -55,14 +56,18 @@ import static org.hamcrest.Matchers.*; @AbstractAzureTest.AzureTest @ElasticsearchIntegrationTest.ClusterScope( scope = ElasticsearchIntegrationTest.Scope.SUITE, - numDataNodes = 2, + numDataNodes = 1, transportClientRatio = 0.0) public class AzureSnapshotRestoreITest extends AbstractAzureTest { - private final String basePath; + private String getRepositoryPath() { + String testName = "/snapshot-itest/repo-".concat("" + randomIntBetween(1, 1000)); + return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; + } - public AzureSnapshotRestoreITest() { - basePath = "/snapshot-itest/repo-" + randomInt(); + private String getContainerName() { + String testName = "it-".concat(Strings.toUnderscoreCase(getTestName()).replaceAll("_", "-")); + return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } @Override @@ -78,23 +83,29 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { @Before public final void wipeBefore() throws StorageException, ServiceException, URISyntaxException { wipeRepositories(); - cleanRepositoryFiles(basePath); + cleanRepositoryFiles( + getContainerName(), + getContainerName().concat("-1"), + getContainerName().concat("-2")); } @After public final void wipeAfter() throws StorageException, ServiceException, URISyntaxException { wipeRepositories(); - cleanRepositoryFiles(basePath); + cleanRepositoryFiles( + getContainerName(), + getContainerName().concat("-1"), + getContainerName().concat("-2")); } @Test public void testSimpleWorkflow() { Client client = client(); - logger.info("--> creating azure repository with path [{}]", basePath); + logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("azure").setSettings(ImmutableSettings.settingsBuilder() - .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration") - .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CONTAINER, getContainerName()) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -163,18 +174,18 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { @Test public void testMultipleRepositories() { Client client = client(); - logger.info("--> creating azure repository with path [{}]", basePath); + logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") .setType("azure").setSettings(ImmutableSettings.settingsBuilder() - .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration1") - .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CONTAINER, getContainerName().concat("-1")) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) ).get(); assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") .setType("azure").setSettings(ImmutableSettings.settingsBuilder() - .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration2") - .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CONTAINER, getContainerName().concat("-2")) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) ).get(); assertThat(putRepositoryResponse2.isAcknowledged(), equalTo(true)); @@ -226,6 +237,65 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(true)); } + /** + * For issue #26: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/26 + */ + @Test + public void testListBlobs_26() throws StorageException, ServiceException, URISyntaxException { + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + + ClusterAdminClient client = client().admin().cluster(); + logger.info("--> creating azure repository without any path"); + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + .setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, getContainerName()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + // Get all snapshots - should be empty + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + + // Get all snapshots - should have one + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + + // Clean the snapshot + client.prepareDeleteSnapshot("test-repo", "test-snap").get(); + client.prepareDeleteRepository("test-repo").get(); + + logger.info("--> creating azure repository path [{}]", getRepositoryPath()); + putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + .setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, getContainerName()) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + // Get all snapshots - should be empty + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(0)); + + logger.info("--> snapshot"); + createSnapshotResponse = client.prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + + // Get all snapshots - should have one + assertThat(client.prepareGetSnapshots("test-repo").get().getSnapshots().size(), equalTo(1)); + + + } + /** * For issue #21: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21 */ @@ -254,7 +324,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") .setType("azure").setSettings(ImmutableSettings.settingsBuilder() .put(AzureStorageService.Fields.CONTAINER, container) - .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), is(correct)); @@ -273,11 +343,11 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { @Test public void testNonExistingRepo_23() { Client client = client(); - logger.info("--> creating azure repository with path [{}]", basePath); + logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("azure").setSettings(ImmutableSettings.settingsBuilder() - .put(AzureStorageService.Fields.CONTAINER, "elasticsearch-integration") - .put(AzureStorageService.Fields.BASE_PATH, basePath) + .put(AzureStorageService.Fields.CONTAINER, getContainerName()) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -309,17 +379,13 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { } /** - * Purge the test container + * Purge the test containers */ - public void cleanRepositoryFiles(String path) throws StorageException, ServiceException, URISyntaxException { - String container = internalCluster().getInstance(Settings.class).get("repositories.azure.container", - AzureRepository.CONTAINER_DEFAULT); - logger.info("--> remove blobs in container [{}], path [{}]", container, path); + public void cleanRepositoryFiles(String... containers) throws StorageException, ServiceException, URISyntaxException { AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); - - // Remove starting / if any - path = Strings.trimLeadingCharacter(path, '/'); - - client.deleteFiles(container, path); + for (String container : containers) { + logger.info("--> remove container [{}]", container); + client.removeContainer(container); + } } } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 689bee26b21..63b1d12bd35 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -82,7 +82,7 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent listBlobsByPrefix(String container, String prefix) { + public ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) { ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); for (String blobName : blobs.keySet()) { if (Strings.startsWithIgnoreCase(blobName, prefix)) { From ecd95dab9b957fb3ffaae8df0a13119a017d7e4a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 31 Jul 2014 00:11:53 +0200 Subject: [PATCH 038/125] Docs: use Oracle Java by default Installing Oracle JVM is as easy as: ``` echo debconf shared/accepted-oracle-license-v1-1 select true | \ sudo debconf-set-selections && echo debconf shared/accepted-oracle-license-v1-1 seen true | \ sudo debconf-set-selections && sudo add-apt-repository ppa:webupd8team/java && sudo apt-get update && sudo apt-get install oracle-java7-installer ``` See http://www.webupd8.org/2012/01/install-oracle-java-jdk-7-in-ubuntu-via.html Closes #19. --- README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 975ed8c1ee9..a2439fa3af5 100644 --- a/README.md +++ b/README.md @@ -209,11 +209,15 @@ ssh azure-elasticsearch-cluster.cloudapp.net Once connected, install Elasticsearch: ```sh -# Install Latest OpenJDK -# If you would like to use Oracle JDK instead, read the following: -# http://www.webupd8.org/2012/01/install-oracle-java-jdk-7-in-ubuntu-via.html +# Install Latest Java version +# Read http://www.webupd8.org/2012/01/install-oracle-java-jdk-7-in-ubuntu-via.html for details +sudo add-apt-repository ppa:webupd8team/java sudo apt-get update -sudo apt-get install openjdk-7-jre-headless +sudo apt-get install oracle-java7-installer + +# If you want to install OpenJDK instead +# sudo apt-get update +# sudo apt-get install openjdk-7-jre-headless # Download Elasticsearch curl -s https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.0.0.deb -o elasticsearch-1.0.0.deb From 248845638a102a3294c5733e8cc2f1a68cfec2ba Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 31 Jul 2014 00:11:53 +0200 Subject: [PATCH 039/125] Docs: Generating ssh keys There's a much better documentation (including using existing OpenSSH keys like most of us already have for git) on the Azure docs, e.g.: http://azure.microsoft.com/en-us/documentation/articles/linux-use-ssh-key/ Closes #18. --- README.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a2439fa3af5..b28d358d1f3 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,10 @@ Before starting, you need to have: 2014` doesn't seem to create a valid keypair for ssh. FWIW, `OpenSSL 1.0.1c 10 May 2012` on Ubuntu 12.04 LTS is known to work. -Here is a description on how to generate this using `openssl`: +You should follow [this guide](http://azure.microsoft.com/en-us/documentation/articles/linux-use-ssh-key/) to learn +how to create or use existing SSH keys. If you have already did it, you can skip the following. + +Here is a description on how to generate SSH keys using `openssl`: ```sh # You may want to use another dir than /tmp @@ -79,7 +82,12 @@ cd /tmp openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure-private.key -out azure-certificate.pem chmod 600 azure-private.key azure-certificate.pem openssl x509 -outform der -in azure-certificate.pem -out azure-certificate.cer +``` +Generate a keystore which will be used by the plugin to authenticate with a certificate +all Azure API calls. + +```sh # Generate a keystore (azurekeystore.pkcs12) # Transform private key to PEM format openssl pkcs8 -topk8 -nocrypt -in azure-private.key -inform PEM -out azure-pk.pem -outform PEM From 8ea9951e1aa649efd5d9d01dfc68a1f3bcf8804f Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 31 Jul 2014 00:40:47 +0200 Subject: [PATCH 040/125] Replace 404 with HttpURLConnection.HTTP_NOT_FOUND --- .../cloud/azure/blobstore/AbstractAzureBlobContainer.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java index 38e9bc95180..bf09f1e4845 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AbstractAzureBlobContainer.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.logging.ESLoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.net.HttpURLConnection; import java.net.URISyntaxException; /** @@ -97,7 +98,7 @@ public class AbstractAzureBlobContainer extends AbstractBlobContainer { is.close(); listener.onCompleted(); } catch (ServiceException e) { - if (e.getHttpStatusCode() == 404) { + if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { listener.onFailure(new FileNotFoundException(e.getMessage())); } else { listener.onFailure(e); From 42706653e32289d009ff9d8149928c6b4b353ba2 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 31 Jul 2014 00:40:47 +0200 Subject: [PATCH 041/125] Tests: test GET/DELETE on non existing repo `GET` and `DELETE` should return `404` error. Closes #28. --- .../azure/AzureSnapshotRestoreITest.java | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 86c1c3ef536..9f50a81378a 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -296,6 +296,34 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { } + /** + * For issue #28: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/28 + */ + @Test + public void testGetDeleteNonExistingSnapshot_28() throws StorageException, ServiceException, URISyntaxException { + ClusterAdminClient client = client().admin().cluster(); + logger.info("--> creating azure repository without any path"); + PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") + .setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, getContainerName()) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + + try { + client.prepareGetSnapshots("test-repo").addSnapshots("nonexistingsnapshotname").get(); + fail("Shouldn't be here"); + } catch (SnapshotMissingException ex) { + // Expected + } + + try { + client.prepareDeleteSnapshot("test-repo", "nonexistingsnapshotname").get(); + fail("Shouldn't be here"); + } catch (SnapshotMissingException ex) { + // Expected + } + } + /** * For issue #21: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21 */ From 3cd0e8929be853d9a917f3a120816db830053235 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 31 Jul 2014 00:40:47 +0200 Subject: [PATCH 042/125] Add a missing trace to `removeContainer` --- .../org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index 5c617798eff..e968ec7a4f5 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -120,6 +120,9 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent Date: Thu, 31 Jul 2014 00:40:47 +0200 Subject: [PATCH 043/125] Docs: make the welcome page more obvious One of the concern we have with our documentation is that it's hard for users to understand which plugin version they should use with a given elasticsearch version. The change will consist of: * have a clean and simple compatibility matrix on master branch * remove list of versions in other branches Closes #29. --- README.md | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index b28d358d1f3..e7f7f4fb28a 100644 --- a/README.md +++ b/README.md @@ -3,23 +3,31 @@ Azure Cloud Plugin for Elasticsearch The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism. -In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.3.0`. +In order to install the plugin, run: -* For master elasticsearch versions, look at [master branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/master). -* For 1.3.x elasticsearch versions, look at [es-1.3 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3). -* For 1.2.x elasticsearch versions, look at [es-1.2 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.2). -* For 1.1.x elasticsearch versions, look at [es-1.1 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.1). -* For 1.0.x elasticsearch versions, look at [es-1.0 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.0). -* For 0.90.x elasticsearch versions, look at [es-0.90 branch](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-0.90). +```sh +bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.3.0 +``` -| Azure Cloud Plugin | elasticsearch | Release date | -|-----------------------------|---------------------|:------------:| -| 3.0.0-SNAPSHOT | master | XXXX-XX-XX | +You need to install a version matching your Elasticsearch version: -Please read documentation relative to the version you are using: +| Elasticsearch | AWS Cloud Plugin | Docs | +|------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| +| master | Build from source | See below | +| es-1.x | Build from source | [2.5.0-SNAPSHOT](https://github.com/dadoonet/elasticsearch-cloud-azure/tree/docs-es-1.x#azure-cloud-plugin-for-elasticsearch) | +| es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/dadoonet/elasticsearch-cloud-azure/tree/docs-es-1.3#azure-cloud-plugin-for-elasticsearch) | +| es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | +| es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | +| es-1.0 | 2.1.0 | [2.1.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v2.1.0/#azure-cloud-plugin-for-elasticsearch) | +| es-0.90 | 1.0.0.alpha1 | [1.0.0.alpha1](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v1.0.0.alpha1/#azure-cloud-plugin-for-elasticsearch)| -* [3.0.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/master/README.md) +To build a `SNAPSHOT` version, you need to build it with Maven: +```bash +mvn clean install +plugin --install cloud-azure \ + --url file:target/releases/elasticsearch-cloud-azure-X.X.X-SNAPSHOT.zip +``` Azure Virtual Machine Discovery =============================== From 3421168164f6b75eb15e6d15085b00cc8b9890fc Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 1 Aug 2014 18:23:57 +0200 Subject: [PATCH 044/125] Docs: make the welcome page more obvious One of the concern we have with our documentation is that it's hard for users to understand which plugin version they should use with a given elasticsearch version. The change will consist of: * have a clean and simple compatibility matrix on master branch * remove list of versions in other branches Closes #29. --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index e7f7f4fb28a..9aa2aaea236 100644 --- a/README.md +++ b/README.md @@ -14,12 +14,12 @@ You need to install a version matching your Elasticsearch version: | Elasticsearch | AWS Cloud Plugin | Docs | |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | -| es-1.x | Build from source | [2.5.0-SNAPSHOT](https://github.com/dadoonet/elasticsearch-cloud-azure/tree/docs-es-1.x#azure-cloud-plugin-for-elasticsearch) | -| es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/dadoonet/elasticsearch-cloud-azure/tree/docs-es-1.3#azure-cloud-plugin-for-elasticsearch) | -| es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | -| es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | -| es-1.0 | 2.1.0 | [2.1.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v2.1.0/#azure-cloud-plugin-for-elasticsearch) | -| es-0.90 | 1.0.0.alpha1 | [1.0.0.alpha1](https://github.com/elasticsearch/elasticsearch-cloud-azure/blob/v1.0.0.alpha1/#azure-cloud-plugin-for-elasticsearch)| +| es-1.x | Build from source | [2.5.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#azure-cloud-plugin-for-elasticsearch) | +| es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3/#azure-cloud-plugin-for-elasticsearch) | +| es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | +| es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | +| es-1.0 | 2.1.0 | [2.1.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.1.0/#azure-cloud-plugin-for-elasticsearch) | +| es-0.90 | 1.0.0.alpha1 | [1.0.0.alpha1](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v1.0.0.alpha1/#azure-cloud-plugin-for-elasticsearch)| To build a `SNAPSHOT` version, you need to build it with Maven: From 45492c009c574673c722746951005789e66171ad Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 1 Aug 2014 18:23:57 +0200 Subject: [PATCH 045/125] Docs: make the welcome page more obvious One of the concern we have with our documentation is that it's hard for users to understand which plugin version they should use with a given elasticsearch version. The change will consist of: * have a clean and simple compatibility matrix on master branch * remove list of versions in other branches Closes #29. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9aa2aaea236..c842ea536fb 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.3.0 You need to install a version matching your Elasticsearch version: -| Elasticsearch | AWS Cloud Plugin | Docs | +| Elasticsearch | Azure Cloud Plugin| Docs | |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | | es-1.x | Build from source | [2.5.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#azure-cloud-plugin-for-elasticsearch) | From 88ddd31d0b9cb8d05097e7ed28c3df044f5bcf07 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 2 Aug 2014 15:46:56 +0200 Subject: [PATCH 046/125] Fix non integration tests With change #24, non integration can not run anymore. (cherry picked from commit 76eecc8) --- .../azure/AbstractAzureComputeServiceTest.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java index 69dc107a603..96dd47314f6 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -20,12 +20,13 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.ElasticsearchIntegrationTest; -public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest { +public abstract class AbstractAzureComputeServiceTest extends ElasticsearchIntegrationTest { private Class mock; @@ -34,6 +35,14 @@ public abstract class AbstractAzureComputeServiceTest extends AbstractAzureTest this.mock = mock; } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + ImmutableSettings.Builder settings = ImmutableSettings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true); + return settings.build(); + } + protected void checkNumberOfNodes(int expected) { NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().execute().actionGet(); assertNotNull(nodeInfos); From 19dd764ae55580847482f83bce429c64f285bc83 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sun, 3 Aug 2014 13:57:37 +0200 Subject: [PATCH 047/125] Update release tool (cherry picked from commit 288755a) --- dev-tools/build_release.py | 515 ++++++++++++++++++++-------------- dev-tools/email_template.html | 24 ++ dev-tools/email_template.txt | 23 ++ 3 files changed, 344 insertions(+), 218 deletions(-) create mode 100644 dev-tools/email_template.html create mode 100644 dev-tools/email_template.txt diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index db8345440c7..cba075449a9 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -6,7 +6,7 @@ # not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on @@ -22,6 +22,7 @@ import datetime import argparse import github3 import smtplib +import sys from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText @@ -68,18 +69,34 @@ Once it's done it will print all the remaining steps. env = os.environ LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log') -ROOT_DIR = os.path.join(abspath(dirname(__file__)), '../') -README_FILE = ROOT_DIR + 'README.md' -POM_FILE = ROOT_DIR + 'pom.xml' +ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../')) +README_FILE = ROOT_DIR + '/README.md' +POM_FILE = ROOT_DIR + '/pom.xml' +DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools' +########################################################## +# +# Utility methods (log and run) +# +########################################################## +# Log a message def log(msg): log_plain('\n%s' % msg) + +# Purge the log file +def purge_log(): + os.remove(LOG) + + +# Log a message to the LOG file def log_plain(msg): f = open(LOG, mode='ab') f.write(msg.encode('utf-8')) f.close() + +# Run a command and log it def run(command, quiet=False): log('%s: RUN: %s\n' % (datetime.datetime.now(), command)) if os.system('%s >> %s 2>&1' % (command, LOG)): @@ -88,19 +105,25 @@ def run(command, quiet=False): print(msg) raise RuntimeError(msg) +########################################################## +# +# Clean logs and check JAVA and Maven +# +########################################################## try: + purge_log() JAVA_HOME = env['JAVA_HOME'] except KeyError: raise RuntimeError(""" Please set JAVA_HOME in the env before running release tool - On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.6*'`""") + On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""") try: - MVN='mvn' + MVN = 'mvn' # make sure mvn3 is used if mvn3 is available # some systems use maven 2 as default run('mvn3 --version', quiet=True) - MVN='mvn3' + MVN = 'mvn3' except RuntimeError: pass @@ -109,34 +132,15 @@ def java_exe(): path = JAVA_HOME return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) -# Returns the hash of the current git HEAD revision -def get_head_hash(): - return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip() - -# Returns the hash of the given tag revision -def get_tag_hash(tag): - return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip() - -# Returns the name of the current branch -def get_current_branch(): - return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip() +########################################################## +# +# String and file manipulation utils +# +########################################################## # Utility that returns the name of the release branch for a given version -def release_branch(version): - return 'release_branch_%s' % version - -# runs get fetch on the given remote -def fetch(remote): - run('git fetch %s' % remote) - -# Creates a new release branch from the given source branch -# and rebases the source branch from the remote before creating -# the release branch. Note: This fails if the source branch -# doesn't exist on the provided remote. -def create_release_branch(remote, src_branch, release): - run('git checkout %s' % src_branch) - run('git pull --rebase %s %s' % (remote, src_branch)) - run('git checkout -b %s' % (release_branch(release))) +def release_branch(branchsource, version): + return 'release_branch_%s_%s' % (branchsource, version) # Reads the given file and applies the @@ -146,7 +150,7 @@ def create_release_branch(remote, src_branch, release): def process_file(file_path, line_callback): fh, abs_path = tempfile.mkstemp() modified = False - with open(abs_path,'w', encoding='utf-8') as new_file: + with open(abs_path, 'w', encoding='utf-8') as new_file: with open(file_path, encoding='utf-8') as old_file: for line in old_file: new_line = line_callback(line) @@ -164,124 +168,82 @@ def process_file(file_path, line_callback): os.remove(abs_path) return False -# Guess the next snapshot version number (increment second digit) + +# Guess the next snapshot version number (increment last digit) def guess_snapshot(version): - digits=list(map(int, re.findall(r'\d+', version))) - source='%s.%s' % (digits[0], digits[1]) - destination='%s.%s' % (digits[0], digits[1]+1) + digits = list(map(int, re.findall(r'\d+', version))) + source = '%s.%s.%s' % (digits[0], digits[1], digits[2]) + destination = '%s.%s.%s' % (digits[0], digits[1], digits[2] + 1) return version.replace(source, destination) + +# Guess the anchor in generated documentation +# Looks like this "#version-230-for-elasticsearch-13" +def get_doc_anchor(release, esversion): + plugin_digits = list(map(int, re.findall(r'\d+', release))) + es_digits = list(map(int, re.findall(r'\d+', esversion))) + return '#version-%s%s%s-for-elasticsearch-%s%s' % ( + plugin_digits[0], plugin_digits[1], plugin_digits[2], es_digits[0], es_digits[1]) + + # Moves the pom.xml file from a snapshot to a release def remove_maven_snapshot(pom, release): pattern = '%s-SNAPSHOT' % release replacement = '%s' % release + def callback(line): return line.replace(pattern, replacement) + process_file(pom, callback) -# Moves the README.md file from a snapshot to a release -def remove_version_snapshot(readme_file, release): - pattern = '%s-SNAPSHOT' % release - replacement = '%s ' % release - def callback(line): - return line.replace(pattern, replacement) - process_file(readme_file, callback) # Moves the pom.xml file to the next snapshot def add_maven_snapshot(pom, release, snapshot): pattern = '%s' % release replacement = '%s-SNAPSHOT' % snapshot + def callback(line): return line.replace(pattern, replacement) + process_file(pom, callback) -# Add in README.md file the next snapshot -def add_version_snapshot(readme_file, release, snapshot): - pattern = '| %s ' % release - replacement = '| %s-SNAPSHOT' % snapshot - def callback(line): - # If we find pattern, we copy the line and replace its content - if line.find(pattern) >= 0: - return line.replace(pattern, replacement).replace('%s' % (datetime.datetime.now().strftime("%Y-%m-%d")), - 'XXXX-XX-XX')+line - else: - return line - process_file(readme_file, callback) # Moves the README.md file from a snapshot to a release (documentation link) -def remove_documentation_snapshot(readme_file, repo_url, release, branch): - pattern = '* [%s-SNAPSHOT](%sblob/%s/README.md)' % (release, repo_url, branch) - replacement = '* [%s](%sblob/v%s/README.md)' % (release, repo_url, release) +# We need to find the right branch we are on and update the line +# | es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3/#version-240-snapshot-for-elasticsearch-13) | +# | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#version-230-snapshot-for-elasticsearch-13) | +def update_documentation_to_released_version(readme_file, repo_url, release, branch, esversion): + pattern = '%s' % branch + replacement = '| %s | %s | [%s](%stree/v%s/%s) |\n' % ( + branch, release, release, repo_url, release, get_doc_anchor(release, esversion)) + def callback(line): # If we find pattern, we replace its content if line.find(pattern) >= 0: - return line.replace(pattern, replacement) + return replacement else: return line + process_file(readme_file, callback) -# Add in README.markdown file the documentation for the next version -def add_documentation_snapshot(readme_file, repo_url, release, snapshot, branch): - pattern = '* [%s](%sblob/v%s/README.md)' % (release, repo_url, release) - replacement = '* [%s-SNAPSHOT](%sblob/%s/README.md)' % (snapshot, repo_url, branch) - def callback(line): - # If we find pattern, we copy the line and replace its content - if line.find(pattern) >= 0: - return line.replace(pattern, replacement)+line - else: - return line - process_file(readme_file, callback) - -# Set release date in README.md file -def set_date(readme_file): - pattern = 'XXXX-XX-XX' - replacement = '%s' % (datetime.datetime.now().strftime("%Y-%m-%d")) - def callback(line): - return line.replace(pattern, replacement) - process_file(readme_file, callback) # Update installation instructions in README.md file def set_install_instructions(readme_file, artifact_name, release): pattern = '`bin/plugin -install elasticsearch/%s/.+`' % artifact_name replacement = '`bin/plugin -install elasticsearch/%s/%s`' % (artifact_name, release) + def callback(line): return re.sub(pattern, replacement, line) + process_file(readme_file, callback) -# Stages the given files for the next git commit -def add_pending_files(*files): - for file in files: - run('git add %s' % file) - -# Executes a git commit with 'release [version]' as the commit message -def commit_release(artifact_id, release): - run('git commit -m "prepare release %s-%s"' % (artifact_id, release)) - -def commit_snapshot(): - run('git commit -m "prepare for next development iteration"') - -def tag_release(release): - run('git tag -a v%s -m "Tag release version %s"' % (release, release)) - -def run_mvn(*cmd): - for c in cmd: - run('%s; %s -f %s %s' % (java_exe(), MVN, POM_FILE, c)) - -def build_release(run_tests=False, dry_run=True): - target = 'deploy' - if dry_run: - target = 'package' - if run_tests: - run_mvn('clean test') - run_mvn('clean %s -DskipTests' %(target)) - # Checks the pom.xml for the release version. 2.0.0-SNAPSHOT # This method fails if the pom file has no SNAPSHOT version set ie. # if the version is already on a release version we fail. # Returns the next version string ie. 0.90.7 def find_release_version(src_branch): - run('git checkout %s' % src_branch) + git_checkout(src_branch) with open(POM_FILE, encoding='utf-8') as file: for line in file: match = re.search(r'(.+)-SNAPSHOT', line) @@ -289,6 +251,7 @@ def find_release_version(src_branch): return match.group(1) raise RuntimeError('Could not find release version in branch %s' % src_branch) + # extract a value from pom.xml def find_from_pom(tag): with open(POM_FILE, encoding='utf-8') as file: @@ -298,13 +261,16 @@ def find_from_pom(tag): return match.group(1) raise RuntimeError('Could not find <%s> in pom.xml file' % (tag)) + +# Get artifacts which have been generated in target/releases def get_artifacts(artifact_id, release): - artifact_path = ROOT_DIR + 'target/releases/%s-%s.zip' % (artifact_id, release) - print(' Path %s' % (artifact_path)) + artifact_path = ROOT_DIR + '/target/releases/%s-%s.zip' % (artifact_id, release) + print(' Path %s' % artifact_path) if not os.path.isfile(artifact_path): - raise RuntimeError('Could not find required artifact at %s' % (artifact_path)) + raise RuntimeError('Could not find required artifact at %s' % artifact_path) return artifact_path + # Generates sha1 for a file # and returns the checksum files as well # as the given files in a list @@ -316,38 +282,11 @@ def generate_checksums(release_file): if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)): raise RuntimeError('Failed to generate checksum for file %s' % release_file) - res = res + [os.path.join(directory, checksum_file), release_file] + res += [os.path.join(directory, checksum_file), release_file] return res -def git_merge(src_branch, release_version): - run('git checkout %s' % src_branch) - run('git merge %s' % release_branch(release_version)) -def git_push(remote, src_branch, release_version, dry_run): - if not dry_run: - run('git push %s %s' % (remote, src_branch)) # push the commit - run('git push %s v%s' % (remote, release_version)) # push the tag - else: - print(' dryrun [True] -- skipping push to remote %s' % remote) - -def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True): - location = os.path.dirname(os.path.realpath(__file__)) - for artifact in artifacts: - if dry_run: - print('Skip Uploading %s to Amazon S3 in %s' % (artifact, base)) - else: - print('Uploading %s to Amazon S3' % artifact) - # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool - run('python %s/upload-s3.py --file %s --path %s' % (location, os.path.abspath(artifact), base)) - - -################# -## -## -## Email and Github Management -## -## -################# +# Format a GitHub issue as plain text def format_issues_plain(issues, title='Fix'): response = "" @@ -358,6 +297,8 @@ def format_issues_plain(issues, title='Fix'): return response + +# Format a GitHub issue as html text def format_issues_html(issues, title='Fix'): response = "" @@ -369,6 +310,130 @@ def format_issues_html(issues, title='Fix'): return response + +########################################################## +# +# GIT commands +# +########################################################## +# Returns the hash of the current git HEAD revision +def get_head_hash(): + return os.popen('git rev-parse --verify HEAD 2>&1').read().strip() + + +# Returns the name of the current branch +def get_current_branch(): + return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip() + + +# runs get fetch on the given remote +def fetch(remote): + run('git fetch %s' % remote) + + +# Creates a new release branch from the given source branch +# and rebases the source branch from the remote before creating +# the release branch. Note: This fails if the source branch +# doesn't exist on the provided remote. +def create_release_branch(remote, src_branch, release): + git_checkout(src_branch) + run('git pull --rebase %s %s' % (remote, src_branch)) + run('git checkout -b %s' % (release_branch(src_branch, release))) + + +# Stages the given files for the next git commit +def add_pending_files(*files): + for file in files: + run('git add %s' % file) + + +# Executes a git commit with 'release [version]' as the commit message +def commit_release(artifact_id, release): + run('git commit -m "prepare release %s-%s"' % (artifact_id, release)) + + +# Commit documentation changes on the master branch +def commit_master(release): + run('git commit -m "update documentation with release %s"' % release) + + +# Commit next snapshot files +def commit_snapshot(): + run('git commit -m "prepare for next development iteration"') + + +# Put the version tag on on the current commit +def tag_release(release): + run('git tag -a v%s -m "Tag release version %s"' % (release, release)) + + +# Checkout a given branch +def git_checkout(branch): + run('git checkout %s' % branch) + + +# Merge the release branch with the actual branch +def git_merge(src_branch, release_version): + git_checkout(src_branch) + run('git merge %s' % release_branch(src_branch, release_version)) + + +# Push the actual branch and master branch +def git_push(remote, src_branch, release_version, dry_run): + if not dry_run: + run('git push %s %s master' % (remote, src_branch)) # push the commit and the master + run('git push %s v%s' % (remote, release_version)) # push the tag + else: + print(' dryrun [True] -- skipping push to remote %s %s master' % (remote, src_branch)) + + +########################################################## +# +# Maven commands +# +########################################################## +# Run a given maven command +def run_mvn(*cmd): + for c in cmd: + run('%s; %s -f %s %s' % (java_exe(), MVN, POM_FILE, c)) + + +# Run deploy or package depending on dry_run +# Default to run mvn package +# When run_tests=True a first mvn clean test is run +def build_release(run_tests=False, dry_run=True): + target = 'deploy' + tests = '-DskipTests' + if run_tests: + tests = '' + if dry_run: + target = 'package' + run_mvn('clean %s %s' % (target, tests)) + + +########################################################## +# +# Amazon S3 publish commands +# +########################################################## +# Upload files to S3 +def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True): + location = os.path.dirname(os.path.realpath(__file__)) + for artifact in artifacts: + if dry_run: + print('Skip Uploading %s to Amazon S3 in %s' % (artifact, base)) + else: + print('Uploading %s to Amazon S3' % artifact) + # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool + run('python %s/upload-s3.py --file %s --path %s' % (location, os.path.abspath(artifact), base)) + + +########################################################## +# +# Email and Github Management +# +########################################################## +# Create a Github repository instance to access issues def get_github_repository(reponame, login=env.get('GITHUB_LOGIN', None), password=env.get('GITHUB_PASSWORD', None), @@ -382,31 +447,45 @@ def get_github_repository(reponame, return g.repository("elasticsearch", reponame) + # Check if there are some remaining open issues and fails def check_opened_issues(version, repository, reponame): opened_issues = [i for i in repository.iter_issues(state='open', labels='%s' % version)] - if len(opened_issues)>0: - raise NameError('Some issues [%s] are still opened. Check https://github.com/elasticsearch/%s/issues?labels=%s&state=open' - % (len(opened_issues), reponame, version)) + if len(opened_issues) > 0: + raise NameError( + 'Some issues [%s] are still opened. Check https://github.com/elasticsearch/%s/issues?labels=%s&state=open' + % (len(opened_issues), reponame, version)) + # List issues from github: can be done anonymously if you don't # exceed a given number of github API calls per day -# Check if there are some remaining open issues and fails def list_issues(version, repository, severity='bug'): issues = [i for i in repository.iter_issues(state='closed', labels='%s,%s' % (severity, version))] return issues + +def read_email_template(format='html'): + file_name = '%s/email_template.%s' % (DEV_TOOLS_DIR, format) + log('open email template %s' % file_name) + with open(file_name, encoding='utf-8') as template_file: + data = template_file.read() + return data + + +# Read template messages +template_email_html = read_email_template('html') +template_email_txt = read_email_template('txt') + + # Get issues from github and generates a Plain/HTML Multipart email -# And send it if dry_run=False def prepare_email(artifact_id, release_version, repository, artifact_name, artifact_description, project_url, severity_labels_bug='bug', severity_labels_update='update', severity_labels_new='new', severity_labels_doc='doc'): - ## Get bugs from github issues_bug = list_issues(release_version, repository, severity=severity_labels_bug) issues_update = list_issues(release_version, repository, severity=severity_labels_update) @@ -425,7 +504,7 @@ def prepare_email(artifact_id, release_version, repository, html_issues_new = format_issues_html(issues_new, 'New') html_issues_doc = format_issues_html(issues_doc, 'Doc') - if len(issues_bug)+len(issues_update)+len(issues_new)+len(issues_doc) > 0: + if len(issues_bug) + len(issues_update) + len(issues_new) + len(issues_doc) > 0: plain_empty_message = "" html_empty_message = "" @@ -435,31 +514,7 @@ def prepare_email(artifact_id, release_version, repository, msg = MIMEMultipart('alternative') msg['Subject'] = '[ANN] %s %s released' % (artifact_name, release_version) - text = """ -Heya, - - -We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s. - -%(artifact_description)s. - -%(project_url)s - -Release Notes - %(artifact_id)s - Version %(release_version)s - -%(empty_message)s -%(issues_bug)s -%(issues_update)s -%(issues_new)s -%(issues_doc)s - -Issues, Pull requests, Feature requests are warmly welcome on %(artifact_id)s project repository: %(project_url)s -For questions or comments around this plugin, feel free to use elasticsearch mailing list: https://groups.google.com/forum/#!forum/elasticsearch - -Enjoy, - --The Elasticsearch team -""" % {'release_version': release_version, + text = template_email_txt % {'release_version': release_version, 'artifact_id': artifact_id, 'artifact_name': artifact_name, 'artifact_description': artifact_description, @@ -470,32 +525,7 @@ Enjoy, 'issues_new': plain_issues_new, 'issues_doc': plain_issues_doc} - html = """ - - -

    Heya,

    - -

    We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s

    - -
    %(artifact_description)s.
    - -

    Release Notes - Version %(release_version)s

    -%(empty_message)s -%(issues_bug)s -%(issues_update)s -%(issues_new)s -%(issues_doc)s - -

    Issues, Pull requests, Feature requests are warmly welcome on -%(artifact_id)s project repository!

    -

    For questions or comments around this plugin, feel free to use elasticsearch -mailing list!

    - -

    Enjoy,

    - -

    - The Elasticsearch team

    - -""" % {'release_version': release_version, + html = template_email_html % {'release_version': release_version, 'artifact_id': artifact_id, 'artifact_name': artifact_name, 'artifact_description': artifact_description, @@ -518,6 +548,7 @@ Enjoy, return msg + def send_email(msg, dry_run=True, mail=True, @@ -527,14 +558,16 @@ def send_email(msg, msg['From'] = 'Elasticsearch Team <%s>' % sender msg['To'] = 'Elasticsearch Mailing List <%s>' % to # save mail on disk - with open(ROOT_DIR+'target/email.txt', 'w') as email_file: + with open(ROOT_DIR + '/target/email.txt', 'w') as email_file: email_file.write(msg.as_string()) if mail and not dry_run: s = smtplib.SMTP(smtp_server, 25) s.sendmail(sender, to, msg.as_string()) s.quit() else: - print('generated email: open %starget/email.txt' % ROOT_DIR) + print('generated email: open %s/target/email.txt' % ROOT_DIR) + print(msg.as_string()) + def print_sonatype_notice(): settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml') @@ -566,13 +599,18 @@ def print_sonatype_notice(): """) + def check_s3_credentials(): if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None): - raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3') + raise RuntimeError( + 'Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3') + def check_github_credentials(): if not env.get('GITHUB_KEY', None) and not env.get('GITHUB_LOGIN', None): - log('WARN: Could not find "GITHUB_LOGIN" / "GITHUB_PASSWORD" or "GITHUB_KEY" in the env variables. You could need it.') + log( + 'WARN: Could not find "GITHUB_LOGIN" / "GITHUB_PASSWORD" or "GITHUB_KEY" in the env variables. You could need it.') + def check_email_settings(): if not env.get('MAIL_SENDER', None): @@ -605,6 +643,9 @@ if __name__ == '__main__': dry_run = args.dryrun mail = args.mail + if src_branch == 'master': + raise RuntimeError('Can not release the master branch. You need to create another branch before a release') + if not dry_run: check_s3_credentials() print('WARNING: dryrun is set to "false" - this will push and publish the release') @@ -645,20 +686,32 @@ if __name__ == '__main__': if not dry_run: smoke_test_version = release_version - head_hash = get_head_hash() - run_mvn('clean') # clean the env! - create_release_branch(remote, src_branch, release_version) - print(' Created release branch [%s]' % (release_branch(release_version))) + + try: + git_checkout('master') + master_hash = get_head_hash() + git_checkout(src_branch) + version_hash = get_head_hash() + run_mvn('clean') # clean the env! + create_release_branch(remote, 'master', release_version) + print(' Created release branch [%s]' % (release_branch('master', release_version))) + create_release_branch(remote, src_branch, release_version) + print(' Created release branch [%s]' % (release_branch(src_branch, release_version))) + except RuntimeError: + print('Logs:') + with open(LOG, 'r') as log_file: + print(log_file.read()) + sys.exit(-1) + success = False try: + ######################################## + # Start update process in version branch + ######################################## pending_files = [POM_FILE, README_FILE] remove_maven_snapshot(POM_FILE, release_version) - remove_documentation_snapshot(README_FILE, project_url, release_version, src_branch) - remove_version_snapshot(README_FILE, release_version) - set_date(README_FILE) - set_install_instructions(README_FILE, artifact_id, release_version) print(' Done removing snapshot version') - add_pending_files(*pending_files) # expects var args use * to expand + add_pending_files(*pending_files) # expects var args use * to expand commit_release(artifact_id, release_version) print(' Committed release version [%s]' % release_version) print(''.join(['-' for _ in range(80)])) @@ -676,25 +729,38 @@ if __name__ == '__main__': artifact_and_checksums = generate_checksums(artifact) print(''.join(['-' for _ in range(80)])) + ######################################## + # Start update process in master branch + ######################################## + git_checkout(release_branch('master', release_version)) + update_documentation_to_released_version(README_FILE, project_url, release_version, src_branch, + elasticsearch_version) + set_install_instructions(README_FILE, artifact_id, release_version) + add_pending_files(*pending_files) # expects var args use * to expand + commit_master(release_version) + print('Finish Release -- dry_run: %s' % dry_run) input('Press Enter to continue...') + print(' merge release branch') git_merge(src_branch, release_version) print(' tag') tag_release(release_version) add_maven_snapshot(POM_FILE, release_version, snapshot_version) - add_version_snapshot(README_FILE, release_version, snapshot_version) - add_documentation_snapshot(README_FILE, project_url, release_version, snapshot_version, src_branch) add_pending_files(*pending_files) commit_snapshot() + print(' merge master branch') + git_merge('master', release_version) + print(' push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run)) git_push(remote, src_branch, release_version, dry_run) print(' publish artifacts to S3 -- dry_run: %s' % dry_run) publish_artifacts(artifact_and_checksums, base='elasticsearch/%s' % (artifact_id) , dry_run=dry_run) print(' preparing email (from github issues)') msg = prepare_email(artifact_id, release_version, repository, artifact_name, artifact_description, project_url) + input('Press Enter to send email...') print(' sending email -- dry_run: %s, mail: %s' % (dry_run, mail)) send_email(msg, dry_run=dry_run, mail=mail) @@ -710,13 +776,26 @@ Release successful pending steps: success = True finally: if not success: - run('git reset --hard HEAD') - run('git checkout %s' % src_branch) + print('Logs:') + with open(LOG, 'r') as log_file: + print(log_file.read()) + git_checkout('master') + run('git reset --hard %s' % master_hash) + git_checkout(src_branch) + run('git reset --hard %s' % version_hash) + try: + run('git tag -d v%s' % release_version) + except RuntimeError: + pass elif dry_run: print('End of dry_run') input('Press Enter to reset changes...') - - run('git reset --hard %s' % head_hash) + git_checkout('master') + run('git reset --hard %s' % master_hash) + git_checkout(src_branch) + run('git reset --hard %s' % version_hash) run('git tag -d v%s' % release_version) + # we delete this one anyways - run('git branch -D %s' % (release_branch(release_version))) + run('git branch -D %s' % (release_branch('master', release_version))) + run('git branch -D %s' % (release_branch(src_branch, release_version))) diff --git a/dev-tools/email_template.html b/dev-tools/email_template.html new file mode 100644 index 00000000000..96d0b4b8d19 --- /dev/null +++ b/dev-tools/email_template.html @@ -0,0 +1,24 @@ + + +

    Heya,

    + +

    We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s

    + +
    %(artifact_description)s.
    + +

    Release Notes - Version %(release_version)s

    +%(empty_message)s +%(issues_bug)s +%(issues_update)s +%(issues_new)s +%(issues_doc)s + +

    Issues, Pull requests, Feature requests are warmly welcome on + %(artifact_id)s project repository!

    +

    For questions or comments around this plugin, feel free to use elasticsearch + mailing list!

    + +

    Enjoy,

    +

    - The Elasticsearch team

    + + diff --git a/dev-tools/email_template.txt b/dev-tools/email_template.txt new file mode 100644 index 00000000000..1550849c2ac --- /dev/null +++ b/dev-tools/email_template.txt @@ -0,0 +1,23 @@ +Heya, + + +We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s. + +%(artifact_description)s. + +%(project_url)s + +Release Notes - %(artifact_id)s - Version %(release_version)s + +%(empty_message)s +%(issues_bug)s +%(issues_update)s +%(issues_new)s +%(issues_doc)s + +Issues, Pull requests, Feature requests are warmly welcome on %(artifact_id)s project repository: %(project_url)s +For questions or comments around this plugin, feel free to use elasticsearch mailing list: https://groups.google.com/forum/#!forum/elasticsearch + +Enjoy, + +-The Elasticsearch team From 5240576320bda69613f49efe581a25d4b2bf4ea6 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 16:11:37 +0200 Subject: [PATCH 048/125] Update release tool (cherry picked from commit b3dd4a7) --- dev-tools/build_release.py | 47 ++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index cba075449a9..873ef9c1a38 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -46,10 +46,13 @@ from os.path import dirname, abspath - run prerequisite checks ie. check for S3 credentials available as env variables - detect the version to release from the specified branch (--branch) or the current branch - - creates a release branch & updates pom.xml and README.md to point to a release version rather than a snapshot + - check that github issues related to the version are closed + - creates a version release branch & updates pom.xml to point to a release version rather than a snapshot + - creates a master release branch & updates README.md to point to the latest release version for the given elasticsearch branch - builds the artifacts - - commits the new version and merges the release branch into the source branch - - creates a tag and pushes the commit to the specified origin (--remote) + - commits the new version and merges the version release branch into the source branch + - merges the master release branch into the master branch + - creates a tag and pushes branch and master to the specified origin (--remote) - publishes the releases to sonatype and S3 - send a mail based on github issues fixed by this version @@ -72,7 +75,7 @@ LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log') ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../')) README_FILE = ROOT_DIR + '/README.md' POM_FILE = ROOT_DIR + '/pom.xml' -DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools' +DEV_TOOLS_DIR = ROOT_DIR + '/plugin_tools' ########################################################## # @@ -515,26 +518,26 @@ def prepare_email(artifact_id, release_version, repository, msg = MIMEMultipart('alternative') msg['Subject'] = '[ANN] %s %s released' % (artifact_name, release_version) text = template_email_txt % {'release_version': release_version, - 'artifact_id': artifact_id, - 'artifact_name': artifact_name, - 'artifact_description': artifact_description, - 'project_url': project_url, - 'empty_message': plain_empty_message, - 'issues_bug': plain_issues_bug, - 'issues_update': plain_issues_update, - 'issues_new': plain_issues_new, - 'issues_doc': plain_issues_doc} + 'artifact_id': artifact_id, + 'artifact_name': artifact_name, + 'artifact_description': artifact_description, + 'project_url': project_url, + 'empty_message': plain_empty_message, + 'issues_bug': plain_issues_bug, + 'issues_update': plain_issues_update, + 'issues_new': plain_issues_new, + 'issues_doc': plain_issues_doc} html = template_email_html % {'release_version': release_version, - 'artifact_id': artifact_id, - 'artifact_name': artifact_name, - 'artifact_description': artifact_description, - 'project_url': project_url, - 'empty_message': html_empty_message, - 'issues_bug': html_issues_bug, - 'issues_update': html_issues_update, - 'issues_new': html_issues_new, - 'issues_doc': html_issues_doc} + 'artifact_id': artifact_id, + 'artifact_name': artifact_name, + 'artifact_description': artifact_description, + 'project_url': project_url, + 'empty_message': html_empty_message, + 'issues_bug': html_issues_bug, + 'issues_update': html_issues_update, + 'issues_new': html_issues_new, + 'issues_doc': html_issues_doc} # Record the MIME types of both parts - text/plain and text/html. part1 = MIMEText(text, 'plain') From 2f52610522ac69561a54200a9f331b81ae44067b Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 16:19:37 +0200 Subject: [PATCH 049/125] Update release tool (cherry picked from commit d56d5fb) --- dev-tools/build_release.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 873ef9c1a38..16bbb0aac08 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -75,7 +75,7 @@ LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log') ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../')) README_FILE = ROOT_DIR + '/README.md' POM_FILE = ROOT_DIR + '/pom.xml' -DEV_TOOLS_DIR = ROOT_DIR + '/plugin_tools' +DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools' ########################################################## # From e08dc71c2f76522ff441026c6be10400b066031b Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 16:24:26 +0200 Subject: [PATCH 050/125] update documentation with release 2.4.0 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c842ea536fb..a17e7932156 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ You need to install a version matching your Elasticsearch version: |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | | es-1.x | Build from source | [2.5.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#azure-cloud-plugin-for-elasticsearch) | -| es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3/#azure-cloud-plugin-for-elasticsearch) | +| es-1.3 | 2.4.0 | [2.4.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.4.0/#version-240-for-elasticsearch-13) | | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | | es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | | es-1.0 | 2.1.0 | [2.1.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.1.0/#azure-cloud-plugin-for-elasticsearch) | From 6582d8b42c1f55f91e04d806b313ffe989e52624 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 16:19:37 +0200 Subject: [PATCH 051/125] fix install instruction with version 2.4.0 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a17e7932156..e9720a94ca6 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, run: ```sh -bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.3.0 +bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.4.0 ``` You need to install a version matching your Elasticsearch version: From 985e3002b4a5722e0e9060f56ae4f9c50475a7d0 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 16:19:37 +0200 Subject: [PATCH 052/125] Fix installation instructions update --- dev-tools/build_release.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 16bbb0aac08..94ec7204326 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -232,8 +232,8 @@ def update_documentation_to_released_version(readme_file, repo_url, release, bra # Update installation instructions in README.md file def set_install_instructions(readme_file, artifact_name, release): - pattern = '`bin/plugin -install elasticsearch/%s/.+`' % artifact_name - replacement = '`bin/plugin -install elasticsearch/%s/%s`' % (artifact_name, release) + pattern = 'bin/plugin -install elasticsearch/%s/.+' % artifact_name + replacement = 'bin/plugin -install elasticsearch/%s/%s' % (artifact_name, release) def callback(line): return re.sub(pattern, replacement, line) From 8e71337d041be0129647e0c92075891ef2db01d3 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 22:56:02 +0200 Subject: [PATCH 053/125] Update release script We need also to update README in current release branch. (cherry picked from commit 42db2fa) (cherry picked from commit 91d7989) --- dev-tools/build_release.py | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 94ec7204326..c6d4db295d9 100755 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -172,9 +172,14 @@ def process_file(file_path, line_callback): return False +# Split a version x.y.z as an array of digits [x,y,z] +def split_version_to_digits(version): + return list(map(int, re.findall(r'\d+', version))) + + # Guess the next snapshot version number (increment last digit) def guess_snapshot(version): - digits = list(map(int, re.findall(r'\d+', version))) + digits = split_version_to_digits(version) source = '%s.%s.%s' % (digits[0], digits[1], digits[2]) destination = '%s.%s.%s' % (digits[0], digits[1], digits[2] + 1) return version.replace(source, destination) @@ -183,8 +188,8 @@ def guess_snapshot(version): # Guess the anchor in generated documentation # Looks like this "#version-230-for-elasticsearch-13" def get_doc_anchor(release, esversion): - plugin_digits = list(map(int, re.findall(r'\d+', release))) - es_digits = list(map(int, re.findall(r'\d+', esversion))) + plugin_digits = split_version_to_digits(release) + es_digits = split_version_to_digits(esversion) return '#version-%s%s%s-for-elasticsearch-%s%s' % ( plugin_digits[0], plugin_digits[1], plugin_digits[2], es_digits[0], es_digits[1]) @@ -211,6 +216,26 @@ def add_maven_snapshot(pom, release, snapshot): process_file(pom, callback) +# Moves the README.md file from a snapshot to a release version. Doc looks like: +# ## Version 2.5.0-SNAPSHOT for Elasticsearch: 1.x +# It needs to be updated to +# ## Version 2.5.0 for Elasticsearch: 1.x +def update_documentation_in_released_branch(readme_file, release, esversion): + pattern = '## Version (.)+ for Elasticsearch: (.)+' + es_digits = split_version_to_digits(esversion) + replacement = '## Version %s for Elasticsearch: %s.%s\n' % ( + release, es_digits[0], es_digits[1]) + + def callback(line): + # If we find pattern, we replace its content + if re.search(pattern, line) is not None: + return replacement + else: + return line + + process_file(readme_file, callback) + + # Moves the README.md file from a snapshot to a release (documentation link) # We need to find the right branch we are on and update the line # | es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3/#version-240-snapshot-for-elasticsearch-13) | @@ -713,6 +738,7 @@ if __name__ == '__main__': ######################################## pending_files = [POM_FILE, README_FILE] remove_maven_snapshot(POM_FILE, release_version) + update_documentation_in_released_branch(README_FILE, release_version, elasticsearch_version) print(' Done removing snapshot version') add_pending_files(*pending_files) # expects var args use * to expand commit_release(artifact_id, release_version) @@ -751,6 +777,7 @@ if __name__ == '__main__': tag_release(release_version) add_maven_snapshot(POM_FILE, release_version, snapshot_version) + update_documentation_in_released_branch(README_FILE, '%s-SNAPSHOT' % snapshot_version, elasticsearch_version) add_pending_files(*pending_files) commit_snapshot() From 74bd4f78818b3fb60c69dc3c985961d7b861605c Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 4 Aug 2014 22:56:02 +0200 Subject: [PATCH 054/125] Remove unused property --- pom.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/pom.xml b/pom.xml index a752d6531d2..e0baedb018c 100644 --- a/pom.xml +++ b/pom.xml @@ -51,7 +51,6 @@ governing permissions and limitations under the License. --> INFO - ${project.build.testOutputDirectory}/elasticsearch.yml
    From b65373b7a17bad6521447b6d8938d1cc0300d87e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 5 Aug 2014 11:08:26 +0200 Subject: [PATCH 055/125] Use new release tool for elasticsearch plugins See https://github.com/elasticsearch/elasticsearch-plugins-script --- .gitignore | 1 + dev-tools/build_release.py | 831 ---------------------------------- dev-tools/email_template.html | 24 - dev-tools/email_template.txt | 23 - dev-tools/release.py | 134 ++++++ dev-tools/upload-s3.py | 67 --- 6 files changed, 135 insertions(+), 945 deletions(-) delete mode 100755 dev-tools/build_release.py delete mode 100644 dev-tools/email_template.html delete mode 100644 dev-tools/email_template.txt create mode 100644 dev-tools/release.py delete mode 100644 dev-tools/upload-s3.py diff --git a/.gitignore b/.gitignore index 87879ffa87d..38d7701b94d 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ /.classpath *.vmoptions .local-execution-hints.log +plugin_tools diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py deleted file mode 100755 index c6d4db295d9..00000000000 --- a/dev-tools/build_release.py +++ /dev/null @@ -1,831 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -import re -import tempfile -import shutil -import os -import datetime -import argparse -import github3 -import smtplib -import sys - -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText - -from os.path import dirname, abspath - -""" - This tool builds a release from the a given elasticsearch plugin branch. - In order to execute it go in the top level directory and run: - $ python3 dev_tools/build_release.py --branch master --publish --remote origin - - By default this script runs in 'dry' mode which essentially simulates a release. If the - '--publish' option is set the actual release is done. - If not in 'dry' mode, a mail will be automatically sent to the mailing list. - You can disable it with the option '--disable_mail' - - $ python3 dev_tools/build_release.py --publish --remote origin --disable_mail - - The script takes over almost all - steps necessary for a release from a high level point of view it does the following things: - - - run prerequisite checks ie. check for S3 credentials available as env variables - - detect the version to release from the specified branch (--branch) or the current branch - - check that github issues related to the version are closed - - creates a version release branch & updates pom.xml to point to a release version rather than a snapshot - - creates a master release branch & updates README.md to point to the latest release version for the given elasticsearch branch - - builds the artifacts - - commits the new version and merges the version release branch into the source branch - - merges the master release branch into the master branch - - creates a tag and pushes branch and master to the specified origin (--remote) - - publishes the releases to sonatype and S3 - - send a mail based on github issues fixed by this version - -Once it's done it will print all the remaining steps. - - Prerequisites: - - Python 3k for script execution - - Boto for S3 Upload ($ apt-get install python-boto or pip-3.3 install boto) - - github3 module (pip-3.3 install github3.py) - - S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) - - GITHUB (login/password) or key exported via ENV Variables (GITHUB_LOGIN, GITHUB_PASSWORD or GITHUB_KEY) - (see https://github.com/settings/applications#personal-access-tokens) - Optional: default to no authentication - - SMTP_HOST - Optional: default to localhost - - MAIL_SENDER - Optional: default to 'david@pilato.fr': must be authorized to send emails to elasticsearch mailing list - - MAIL_TO - Optional: default to 'elasticsearch@googlegroups.com' -""" -env = os.environ - -LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log') -ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../')) -README_FILE = ROOT_DIR + '/README.md' -POM_FILE = ROOT_DIR + '/pom.xml' -DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools' - -########################################################## -# -# Utility methods (log and run) -# -########################################################## -# Log a message -def log(msg): - log_plain('\n%s' % msg) - - -# Purge the log file -def purge_log(): - os.remove(LOG) - - -# Log a message to the LOG file -def log_plain(msg): - f = open(LOG, mode='ab') - f.write(msg.encode('utf-8')) - f.close() - - -# Run a command and log it -def run(command, quiet=False): - log('%s: RUN: %s\n' % (datetime.datetime.now(), command)) - if os.system('%s >> %s 2>&1' % (command, LOG)): - msg = ' FAILED: %s [see log %s]' % (command, LOG) - if not quiet: - print(msg) - raise RuntimeError(msg) - -########################################################## -# -# Clean logs and check JAVA and Maven -# -########################################################## -try: - purge_log() - JAVA_HOME = env['JAVA_HOME'] -except KeyError: - raise RuntimeError(""" - Please set JAVA_HOME in the env before running release tool - On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""") - -try: - MVN = 'mvn' - # make sure mvn3 is used if mvn3 is available - # some systems use maven 2 as default - run('mvn3 --version', quiet=True) - MVN = 'mvn3' -except RuntimeError: - pass - - -def java_exe(): - path = JAVA_HOME - return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) - - -########################################################## -# -# String and file manipulation utils -# -########################################################## -# Utility that returns the name of the release branch for a given version -def release_branch(branchsource, version): - return 'release_branch_%s_%s' % (branchsource, version) - - -# Reads the given file and applies the -# callback to it. If the callback changed -# a line the given file is replaced with -# the modified input. -def process_file(file_path, line_callback): - fh, abs_path = tempfile.mkstemp() - modified = False - with open(abs_path, 'w', encoding='utf-8') as new_file: - with open(file_path, encoding='utf-8') as old_file: - for line in old_file: - new_line = line_callback(line) - modified = modified or (new_line != line) - new_file.write(new_line) - os.close(fh) - if modified: - #Remove original file - os.remove(file_path) - #Move new file - shutil.move(abs_path, file_path) - return True - else: - # nothing to do - just remove the tmp file - os.remove(abs_path) - return False - - -# Split a version x.y.z as an array of digits [x,y,z] -def split_version_to_digits(version): - return list(map(int, re.findall(r'\d+', version))) - - -# Guess the next snapshot version number (increment last digit) -def guess_snapshot(version): - digits = split_version_to_digits(version) - source = '%s.%s.%s' % (digits[0], digits[1], digits[2]) - destination = '%s.%s.%s' % (digits[0], digits[1], digits[2] + 1) - return version.replace(source, destination) - - -# Guess the anchor in generated documentation -# Looks like this "#version-230-for-elasticsearch-13" -def get_doc_anchor(release, esversion): - plugin_digits = split_version_to_digits(release) - es_digits = split_version_to_digits(esversion) - return '#version-%s%s%s-for-elasticsearch-%s%s' % ( - plugin_digits[0], plugin_digits[1], plugin_digits[2], es_digits[0], es_digits[1]) - - -# Moves the pom.xml file from a snapshot to a release -def remove_maven_snapshot(pom, release): - pattern = '%s-SNAPSHOT' % release - replacement = '%s' % release - - def callback(line): - return line.replace(pattern, replacement) - - process_file(pom, callback) - - -# Moves the pom.xml file to the next snapshot -def add_maven_snapshot(pom, release, snapshot): - pattern = '%s' % release - replacement = '%s-SNAPSHOT' % snapshot - - def callback(line): - return line.replace(pattern, replacement) - - process_file(pom, callback) - - -# Moves the README.md file from a snapshot to a release version. Doc looks like: -# ## Version 2.5.0-SNAPSHOT for Elasticsearch: 1.x -# It needs to be updated to -# ## Version 2.5.0 for Elasticsearch: 1.x -def update_documentation_in_released_branch(readme_file, release, esversion): - pattern = '## Version (.)+ for Elasticsearch: (.)+' - es_digits = split_version_to_digits(esversion) - replacement = '## Version %s for Elasticsearch: %s.%s\n' % ( - release, es_digits[0], es_digits[1]) - - def callback(line): - # If we find pattern, we replace its content - if re.search(pattern, line) is not None: - return replacement - else: - return line - - process_file(readme_file, callback) - - -# Moves the README.md file from a snapshot to a release (documentation link) -# We need to find the right branch we are on and update the line -# | es-1.3 | Build from source | [2.4.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.3/#version-240-snapshot-for-elasticsearch-13) | -# | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#version-230-snapshot-for-elasticsearch-13) | -def update_documentation_to_released_version(readme_file, repo_url, release, branch, esversion): - pattern = '%s' % branch - replacement = '| %s | %s | [%s](%stree/v%s/%s) |\n' % ( - branch, release, release, repo_url, release, get_doc_anchor(release, esversion)) - - def callback(line): - # If we find pattern, we replace its content - if line.find(pattern) >= 0: - return replacement - else: - return line - - process_file(readme_file, callback) - - -# Update installation instructions in README.md file -def set_install_instructions(readme_file, artifact_name, release): - pattern = 'bin/plugin -install elasticsearch/%s/.+' % artifact_name - replacement = 'bin/plugin -install elasticsearch/%s/%s' % (artifact_name, release) - - def callback(line): - return re.sub(pattern, replacement, line) - - process_file(readme_file, callback) - - -# Checks the pom.xml for the release version. 2.0.0-SNAPSHOT -# This method fails if the pom file has no SNAPSHOT version set ie. -# if the version is already on a release version we fail. -# Returns the next version string ie. 0.90.7 -def find_release_version(src_branch): - git_checkout(src_branch) - with open(POM_FILE, encoding='utf-8') as file: - for line in file: - match = re.search(r'(.+)-SNAPSHOT', line) - if match: - return match.group(1) - raise RuntimeError('Could not find release version in branch %s' % src_branch) - - -# extract a value from pom.xml -def find_from_pom(tag): - with open(POM_FILE, encoding='utf-8') as file: - for line in file: - match = re.search(r'<%s>(.+)' % (tag, tag), line) - if match: - return match.group(1) - raise RuntimeError('Could not find <%s> in pom.xml file' % (tag)) - - -# Get artifacts which have been generated in target/releases -def get_artifacts(artifact_id, release): - artifact_path = ROOT_DIR + '/target/releases/%s-%s.zip' % (artifact_id, release) - print(' Path %s' % artifact_path) - if not os.path.isfile(artifact_path): - raise RuntimeError('Could not find required artifact at %s' % artifact_path) - return artifact_path - - -# Generates sha1 for a file -# and returns the checksum files as well -# as the given files in a list -def generate_checksums(release_file): - res = [] - directory = os.path.dirname(release_file) - file = os.path.basename(release_file) - checksum_file = '%s.sha1.txt' % file - - if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)): - raise RuntimeError('Failed to generate checksum for file %s' % release_file) - res += [os.path.join(directory, checksum_file), release_file] - return res - - -# Format a GitHub issue as plain text -def format_issues_plain(issues, title='Fix'): - response = "" - - if len(issues) > 0: - response += '%s:\n' % title - for issue in issues: - response += ' * [%s] - %s (%s)\n' % (issue.number, issue.title, issue.html_url) - - return response - - -# Format a GitHub issue as html text -def format_issues_html(issues, title='Fix'): - response = "" - - if len(issues) > 0: - response += '

    %s

    \n
      \n' % title - for issue in issues: - response += '
    • [%s] - %s\n' % (issue.html_url, issue.number, issue.title) - response += '
    \n' - - return response - - -########################################################## -# -# GIT commands -# -########################################################## -# Returns the hash of the current git HEAD revision -def get_head_hash(): - return os.popen('git rev-parse --verify HEAD 2>&1').read().strip() - - -# Returns the name of the current branch -def get_current_branch(): - return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip() - - -# runs get fetch on the given remote -def fetch(remote): - run('git fetch %s' % remote) - - -# Creates a new release branch from the given source branch -# and rebases the source branch from the remote before creating -# the release branch. Note: This fails if the source branch -# doesn't exist on the provided remote. -def create_release_branch(remote, src_branch, release): - git_checkout(src_branch) - run('git pull --rebase %s %s' % (remote, src_branch)) - run('git checkout -b %s' % (release_branch(src_branch, release))) - - -# Stages the given files for the next git commit -def add_pending_files(*files): - for file in files: - run('git add %s' % file) - - -# Executes a git commit with 'release [version]' as the commit message -def commit_release(artifact_id, release): - run('git commit -m "prepare release %s-%s"' % (artifact_id, release)) - - -# Commit documentation changes on the master branch -def commit_master(release): - run('git commit -m "update documentation with release %s"' % release) - - -# Commit next snapshot files -def commit_snapshot(): - run('git commit -m "prepare for next development iteration"') - - -# Put the version tag on on the current commit -def tag_release(release): - run('git tag -a v%s -m "Tag release version %s"' % (release, release)) - - -# Checkout a given branch -def git_checkout(branch): - run('git checkout %s' % branch) - - -# Merge the release branch with the actual branch -def git_merge(src_branch, release_version): - git_checkout(src_branch) - run('git merge %s' % release_branch(src_branch, release_version)) - - -# Push the actual branch and master branch -def git_push(remote, src_branch, release_version, dry_run): - if not dry_run: - run('git push %s %s master' % (remote, src_branch)) # push the commit and the master - run('git push %s v%s' % (remote, release_version)) # push the tag - else: - print(' dryrun [True] -- skipping push to remote %s %s master' % (remote, src_branch)) - - -########################################################## -# -# Maven commands -# -########################################################## -# Run a given maven command -def run_mvn(*cmd): - for c in cmd: - run('%s; %s -f %s %s' % (java_exe(), MVN, POM_FILE, c)) - - -# Run deploy or package depending on dry_run -# Default to run mvn package -# When run_tests=True a first mvn clean test is run -def build_release(run_tests=False, dry_run=True): - target = 'deploy' - tests = '-DskipTests' - if run_tests: - tests = '' - if dry_run: - target = 'package' - run_mvn('clean %s %s' % (target, tests)) - - -########################################################## -# -# Amazon S3 publish commands -# -########################################################## -# Upload files to S3 -def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True): - location = os.path.dirname(os.path.realpath(__file__)) - for artifact in artifacts: - if dry_run: - print('Skip Uploading %s to Amazon S3 in %s' % (artifact, base)) - else: - print('Uploading %s to Amazon S3' % artifact) - # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool - run('python %s/upload-s3.py --file %s --path %s' % (location, os.path.abspath(artifact), base)) - - -########################################################## -# -# Email and Github Management -# -########################################################## -# Create a Github repository instance to access issues -def get_github_repository(reponame, - login=env.get('GITHUB_LOGIN', None), - password=env.get('GITHUB_PASSWORD', None), - key=env.get('GITHUB_KEY', None)): - if login: - g = github3.login(login, password) - elif key: - g = github3.login(token=key) - else: - g = github3.GitHub() - - return g.repository("elasticsearch", reponame) - - -# Check if there are some remaining open issues and fails -def check_opened_issues(version, repository, reponame): - opened_issues = [i for i in repository.iter_issues(state='open', labels='%s' % version)] - if len(opened_issues) > 0: - raise NameError( - 'Some issues [%s] are still opened. Check https://github.com/elasticsearch/%s/issues?labels=%s&state=open' - % (len(opened_issues), reponame, version)) - - -# List issues from github: can be done anonymously if you don't -# exceed a given number of github API calls per day -def list_issues(version, - repository, - severity='bug'): - issues = [i for i in repository.iter_issues(state='closed', labels='%s,%s' % (severity, version))] - return issues - - -def read_email_template(format='html'): - file_name = '%s/email_template.%s' % (DEV_TOOLS_DIR, format) - log('open email template %s' % file_name) - with open(file_name, encoding='utf-8') as template_file: - data = template_file.read() - return data - - -# Read template messages -template_email_html = read_email_template('html') -template_email_txt = read_email_template('txt') - - -# Get issues from github and generates a Plain/HTML Multipart email -def prepare_email(artifact_id, release_version, repository, - artifact_name, artifact_description, project_url, - severity_labels_bug='bug', - severity_labels_update='update', - severity_labels_new='new', - severity_labels_doc='doc'): - ## Get bugs from github - issues_bug = list_issues(release_version, repository, severity=severity_labels_bug) - issues_update = list_issues(release_version, repository, severity=severity_labels_update) - issues_new = list_issues(release_version, repository, severity=severity_labels_new) - issues_doc = list_issues(release_version, repository, severity=severity_labels_doc) - - ## Format content to plain text - plain_issues_bug = format_issues_plain(issues_bug, 'Fix') - plain_issues_update = format_issues_plain(issues_update, 'Update') - plain_issues_new = format_issues_plain(issues_new, 'New') - plain_issues_doc = format_issues_plain(issues_doc, 'Doc') - - ## Format content to html - html_issues_bug = format_issues_html(issues_bug, 'Fix') - html_issues_update = format_issues_html(issues_update, 'Update') - html_issues_new = format_issues_html(issues_new, 'New') - html_issues_doc = format_issues_html(issues_doc, 'Doc') - - if len(issues_bug) + len(issues_update) + len(issues_new) + len(issues_doc) > 0: - plain_empty_message = "" - html_empty_message = "" - - else: - plain_empty_message = "No issue listed for this release" - html_empty_message = "

    No issue listed for this release

    " - - msg = MIMEMultipart('alternative') - msg['Subject'] = '[ANN] %s %s released' % (artifact_name, release_version) - text = template_email_txt % {'release_version': release_version, - 'artifact_id': artifact_id, - 'artifact_name': artifact_name, - 'artifact_description': artifact_description, - 'project_url': project_url, - 'empty_message': plain_empty_message, - 'issues_bug': plain_issues_bug, - 'issues_update': plain_issues_update, - 'issues_new': plain_issues_new, - 'issues_doc': plain_issues_doc} - - html = template_email_html % {'release_version': release_version, - 'artifact_id': artifact_id, - 'artifact_name': artifact_name, - 'artifact_description': artifact_description, - 'project_url': project_url, - 'empty_message': html_empty_message, - 'issues_bug': html_issues_bug, - 'issues_update': html_issues_update, - 'issues_new': html_issues_new, - 'issues_doc': html_issues_doc} - - # Record the MIME types of both parts - text/plain and text/html. - part1 = MIMEText(text, 'plain') - part2 = MIMEText(html, 'html') - - # Attach parts into message container. - # According to RFC 2046, the last part of a multipart message, in this case - # the HTML message, is best and preferred. - msg.attach(part1) - msg.attach(part2) - - return msg - - -def send_email(msg, - dry_run=True, - mail=True, - sender=env.get('MAIL_SENDER'), - to=env.get('MAIL_TO', 'elasticsearch@googlegroups.com'), - smtp_server=env.get('SMTP_SERVER', 'localhost')): - msg['From'] = 'Elasticsearch Team <%s>' % sender - msg['To'] = 'Elasticsearch Mailing List <%s>' % to - # save mail on disk - with open(ROOT_DIR + '/target/email.txt', 'w') as email_file: - email_file.write(msg.as_string()) - if mail and not dry_run: - s = smtplib.SMTP(smtp_server, 25) - s.sendmail(sender, to, msg.as_string()) - s.quit() - else: - print('generated email: open %s/target/email.txt' % ROOT_DIR) - print(msg.as_string()) - - -def print_sonatype_notice(): - settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml') - if os.path.isfile(settings): - with open(settings, encoding='utf-8') as settings_file: - for line in settings_file: - if line.strip() == 'sonatype-nexus-snapshots': - # moving out - we found the indicator no need to print the warning - return - print(""" - NOTE: No sonatype settings detected, make sure you have configured - your sonatype credentials in '~/.m2/settings.xml': - - - ... - - - sonatype-nexus-snapshots - your-jira-id - your-jira-pwd - - - sonatype-nexus-staging - your-jira-id - your-jira-pwd - - - ... - - """) - - -def check_s3_credentials(): - if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None): - raise RuntimeError( - 'Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3') - - -def check_github_credentials(): - if not env.get('GITHUB_KEY', None) and not env.get('GITHUB_LOGIN', None): - log( - 'WARN: Could not find "GITHUB_LOGIN" / "GITHUB_PASSWORD" or "GITHUB_KEY" in the env variables. You could need it.') - - -def check_email_settings(): - if not env.get('MAIL_SENDER', None): - raise RuntimeError('Could not find "MAIL_SENDER"') - -# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml -print_sonatype_notice() - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Plugin Release') - parser.add_argument('--branch', '-b', metavar='master', default=get_current_branch(), - help='The branch to release from. Defaults to the current branch.') - parser.add_argument('--skiptests', '-t', dest='tests', action='store_false', - help='Skips tests before release. Tests are run by default.') - parser.set_defaults(tests=True) - parser.add_argument('--remote', '-r', metavar='origin', default='origin', - help='The remote to push the release commit and tag to. Default is [origin]') - parser.add_argument('--publish', '-p', dest='dryrun', action='store_false', - help='Publishes the release. Disable by default.') - parser.add_argument('--disable_mail', '-dm', dest='mail', action='store_false', - help='Do not send a release email. Email is sent by default.') - - parser.set_defaults(dryrun=True) - parser.set_defaults(mail=True) - args = parser.parse_args() - - src_branch = args.branch - remote = args.remote - run_tests = args.tests - dry_run = args.dryrun - mail = args.mail - - if src_branch == 'master': - raise RuntimeError('Can not release the master branch. You need to create another branch before a release') - - if not dry_run: - check_s3_credentials() - print('WARNING: dryrun is set to "false" - this will push and publish the release') - if mail: - check_email_settings() - print('An email to %s will be sent after the release' - % env.get('MAIL_TO', 'elasticsearch@googlegroups.com')) - input('Press Enter to continue...') - - check_github_credentials() - - print(''.join(['-' for _ in range(80)])) - print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run)) - print(' JAVA_HOME is [%s]' % JAVA_HOME) - print(' Running with maven command: [%s] ' % (MVN)) - - release_version = find_release_version(src_branch) - artifact_id = find_from_pom('artifactId') - artifact_name = find_from_pom('name') - artifact_description = find_from_pom('description') - project_url = find_from_pom('url') - elasticsearch_version = find_from_pom('elasticsearch.version') - print(' Artifact Id: [%s]' % artifact_id) - print(' Release version: [%s]' % release_version) - print(' Elasticsearch: [%s]' % elasticsearch_version) - if elasticsearch_version.find('-SNAPSHOT') != -1: - raise RuntimeError('Can not release with a SNAPSHOT elasticsearch dependency: %s' % elasticsearch_version) - - # extract snapshot - default_snapshot_version = guess_snapshot(release_version) - snapshot_version = input('Enter next snapshot version [%s]:' % default_snapshot_version) - snapshot_version = snapshot_version or default_snapshot_version - - print(' Next version: [%s-SNAPSHOT]' % snapshot_version) - print(' Artifact Name: [%s]' % artifact_name) - print(' Artifact Description: [%s]' % artifact_description) - print(' Project URL: [%s]' % project_url) - - if not dry_run: - smoke_test_version = release_version - - try: - git_checkout('master') - master_hash = get_head_hash() - git_checkout(src_branch) - version_hash = get_head_hash() - run_mvn('clean') # clean the env! - create_release_branch(remote, 'master', release_version) - print(' Created release branch [%s]' % (release_branch('master', release_version))) - create_release_branch(remote, src_branch, release_version) - print(' Created release branch [%s]' % (release_branch(src_branch, release_version))) - except RuntimeError: - print('Logs:') - with open(LOG, 'r') as log_file: - print(log_file.read()) - sys.exit(-1) - - success = False - try: - ######################################## - # Start update process in version branch - ######################################## - pending_files = [POM_FILE, README_FILE] - remove_maven_snapshot(POM_FILE, release_version) - update_documentation_in_released_branch(README_FILE, release_version, elasticsearch_version) - print(' Done removing snapshot version') - add_pending_files(*pending_files) # expects var args use * to expand - commit_release(artifact_id, release_version) - print(' Committed release version [%s]' % release_version) - print(''.join(['-' for _ in range(80)])) - print('Building Release candidate') - input('Press Enter to continue...') - print(' Checking github issues') - repository = get_github_repository(artifact_id) - check_opened_issues(release_version, repository, artifact_id) - if not dry_run: - print(' Running maven builds now and publish to sonatype - run-tests [%s]' % run_tests) - else: - print(' Running maven builds now run-tests [%s]' % run_tests) - build_release(run_tests=run_tests, dry_run=dry_run) - artifact = get_artifacts(artifact_id, release_version) - artifact_and_checksums = generate_checksums(artifact) - print(''.join(['-' for _ in range(80)])) - - ######################################## - # Start update process in master branch - ######################################## - git_checkout(release_branch('master', release_version)) - update_documentation_to_released_version(README_FILE, project_url, release_version, src_branch, - elasticsearch_version) - set_install_instructions(README_FILE, artifact_id, release_version) - add_pending_files(*pending_files) # expects var args use * to expand - commit_master(release_version) - - print('Finish Release -- dry_run: %s' % dry_run) - input('Press Enter to continue...') - - print(' merge release branch') - git_merge(src_branch, release_version) - print(' tag') - tag_release(release_version) - - add_maven_snapshot(POM_FILE, release_version, snapshot_version) - update_documentation_in_released_branch(README_FILE, '%s-SNAPSHOT' % snapshot_version, elasticsearch_version) - add_pending_files(*pending_files) - commit_snapshot() - - print(' merge master branch') - git_merge('master', release_version) - - print(' push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run)) - git_push(remote, src_branch, release_version, dry_run) - print(' publish artifacts to S3 -- dry_run: %s' % dry_run) - publish_artifacts(artifact_and_checksums, base='elasticsearch/%s' % (artifact_id) , dry_run=dry_run) - print(' preparing email (from github issues)') - msg = prepare_email(artifact_id, release_version, repository, artifact_name, artifact_description, project_url) - input('Press Enter to send email...') - print(' sending email -- dry_run: %s, mail: %s' % (dry_run, mail)) - send_email(msg, dry_run=dry_run, mail=mail) - - pending_msg = """ -Release successful pending steps: - * close and release sonatype repo: https://oss.sonatype.org/ - * check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/%(artifact_id)s/%(version)s - * tweet about the release -""" - print(pending_msg % {'version': release_version, - 'artifact_id': artifact_id, - 'project_url': project_url}) - success = True - finally: - if not success: - print('Logs:') - with open(LOG, 'r') as log_file: - print(log_file.read()) - git_checkout('master') - run('git reset --hard %s' % master_hash) - git_checkout(src_branch) - run('git reset --hard %s' % version_hash) - try: - run('git tag -d v%s' % release_version) - except RuntimeError: - pass - elif dry_run: - print('End of dry_run') - input('Press Enter to reset changes...') - git_checkout('master') - run('git reset --hard %s' % master_hash) - git_checkout(src_branch) - run('git reset --hard %s' % version_hash) - run('git tag -d v%s' % release_version) - - # we delete this one anyways - run('git branch -D %s' % (release_branch('master', release_version))) - run('git branch -D %s' % (release_branch(src_branch, release_version))) diff --git a/dev-tools/email_template.html b/dev-tools/email_template.html deleted file mode 100644 index 96d0b4b8d19..00000000000 --- a/dev-tools/email_template.html +++ /dev/null @@ -1,24 +0,0 @@ - - -

    Heya,

    - -

    We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s

    - -
    %(artifact_description)s.
    - -

    Release Notes - Version %(release_version)s

    -%(empty_message)s -%(issues_bug)s -%(issues_update)s -%(issues_new)s -%(issues_doc)s - -

    Issues, Pull requests, Feature requests are warmly welcome on - %(artifact_id)s project repository!

    -

    For questions or comments around this plugin, feel free to use elasticsearch - mailing list!

    - -

    Enjoy,

    -

    - The Elasticsearch team

    - - diff --git a/dev-tools/email_template.txt b/dev-tools/email_template.txt deleted file mode 100644 index 1550849c2ac..00000000000 --- a/dev-tools/email_template.txt +++ /dev/null @@ -1,23 +0,0 @@ -Heya, - - -We are pleased to announce the release of the %(artifact_name)s, version %(release_version)s. - -%(artifact_description)s. - -%(project_url)s - -Release Notes - %(artifact_id)s - Version %(release_version)s - -%(empty_message)s -%(issues_bug)s -%(issues_update)s -%(issues_new)s -%(issues_doc)s - -Issues, Pull requests, Feature requests are warmly welcome on %(artifact_id)s project repository: %(project_url)s -For questions or comments around this plugin, feel free to use elasticsearch mailing list: https://groups.google.com/forum/#!forum/elasticsearch - -Enjoy, - --The Elasticsearch team diff --git a/dev-tools/release.py b/dev-tools/release.py new file mode 100644 index 00000000000..edcc637d068 --- /dev/null +++ b/dev-tools/release.py @@ -0,0 +1,134 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +import datetime +import os +import shutil +import sys +import time +import urllib +import urllib.request +import zipfile + +from os.path import dirname, abspath + +""" + This tool builds a release from the a given elasticsearch plugin branch. + + It is basically a wrapper on top of launch_release.py which: + + - tries to get a more recent version of launch_release.py in ... + - download it if needed + - launch it passing all arguments to it, like: + + $ python3 dev_tools/release.py --branch master --publish --remote origin + + Important options: + + # Dry run + $ python3 dev_tools/release.py + + # Dry run without tests + python3 dev_tools/release.py --skiptests + + # Release, publish artifacts and announce + $ python3 dev_tools/release.py --publish + + See full documentation in launch_release.py +""" +env = os.environ + +# Change this if the source repository for your scripts is at a different location +SOURCE_REPO = 'elasticsearch/elasticsearch-plugins-script' +# We define that we should download again the script after 1 days +SCRIPT_OBSOLETE_DAYS = 1 +# We ignore in master.zip file the following files +IGNORED_FILES = ['.gitignore', 'README.md'] + + +ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../')) +TARGET_TOOLS_DIR = ROOT_DIR + '/plugin_tools' +DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools' +BUILD_RELEASE_FILENAME = 'release.zip' +BUILD_RELEASE_FILE = TARGET_TOOLS_DIR + '/' + BUILD_RELEASE_FILENAME +SOURCE_URL = 'https://github.com/%s/archive/master.zip' % SOURCE_REPO + +# Download a recent version of the release plugin tool +try: + os.mkdir(TARGET_TOOLS_DIR) + print('directory %s created' % TARGET_TOOLS_DIR) +except FileExistsError: + pass + + +try: + # we check latest update. If we ran an update recently, we + # are not going to check it again + download = True + + try: + last_download_time = datetime.datetime.fromtimestamp(os.path.getmtime(BUILD_RELEASE_FILE)) + if (datetime.datetime.now()-last_download_time).days < SCRIPT_OBSOLETE_DAYS: + download = False + except FileNotFoundError: + pass + + if download: + urllib.request.urlretrieve(SOURCE_URL, BUILD_RELEASE_FILE) + with zipfile.ZipFile(BUILD_RELEASE_FILE) as myzip: + for member in myzip.infolist(): + filename = os.path.basename(member.filename) + # skip directories + if not filename: + continue + if filename in IGNORED_FILES: + continue + + # copy file (taken from zipfile's extract) + source = myzip.open(member.filename) + target = open(os.path.join(TARGET_TOOLS_DIR, filename), "wb") + with source, target: + shutil.copyfileobj(source, target) + # We keep the original date + date_time = time.mktime(member.date_time + (0, 0, -1)) + os.utime(os.path.join(TARGET_TOOLS_DIR, filename), (date_time, date_time)) + print('plugin-tools updated from %s' % SOURCE_URL) +except urllib.error.HTTPError: + pass + + +# Let see if we need to update the release.py script itself +source_time = os.path.getmtime(TARGET_TOOLS_DIR + '/release.py') +repo_time = os.path.getmtime(DEV_TOOLS_DIR + '/release.py') +if source_time > repo_time: + input('release.py needs an update. Press a key to update it...') + shutil.copyfile(TARGET_TOOLS_DIR + '/release.py', DEV_TOOLS_DIR + '/release.py') + +# We can launch the build process +try: + PYTHON = 'python' + # make sure python3 is used if python3 is available + # some systems use python 2 as default + os.system('python3 --version > /dev/null 2>&1') + PYTHON = 'python3' +except RuntimeError: + pass + +release_args = '' +for x in range(1, len(sys.argv)): + release_args += ' ' + sys.argv[x] + +os.system('%s %s/build_release.py %s' % (PYTHON, TARGET_TOOLS_DIR, release_args)) diff --git a/dev-tools/upload-s3.py b/dev-tools/upload-s3.py deleted file mode 100644 index 95ea576e65c..00000000000 --- a/dev-tools/upload-s3.py +++ /dev/null @@ -1,67 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -import os -import sys -import argparse -try: - import boto.s3 -except: - raise RuntimeError(""" - S3 upload requires boto to be installed - Use one of: - 'pip install -U boto' - 'apt-get install python-boto' - 'easy_install boto' - """) - -import boto.s3 - - -def list_buckets(conn): - return conn.get_all_buckets() - - -def upload_s3(conn, path, key, file, bucket): - print 'Uploading %s to Amazon S3 bucket %s/%s' % \ - (file, bucket, os.path.join(path, key)) - def percent_cb(complete, total): - sys.stdout.write('.') - sys.stdout.flush() - bucket = conn.create_bucket(bucket) - k = bucket.new_key(os.path.join(path, key)) - k.set_contents_from_filename(file, cb=percent_cb, num_cb=100) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Uploads files to Amazon S3') - parser.add_argument('--file', '-f', metavar='path to file', - help='the branch to release from', required=True) - parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org', - help='The S3 Bucket to upload to') - parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch', - help='The key path to use') - parser.add_argument('--key', '-k', metavar='key', default=None, - help='The key - uses the file name as default key') - args = parser.parse_args() - if args.key: - key = args.key - else: - key = os.path.basename(args.file) - - connection = boto.connect_s3() - upload_s3(connection, args.path, key, args.file, args.bucket); - From f5d35004a0e8965a9293cf0f6f6bea2fda3dcaca Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 5 Sep 2014 16:26:42 +0200 Subject: [PATCH 056/125] ZenDiscovery constructor needs ElectMasterService instance Introduced in https://github.com/elasticsearch/elasticsearch/pull/7336 (elasticsearch 1.4 and 2.0), we need to change AzureDiscovery constructor. Closes #34. --- .../org/elasticsearch/discovery/azure/AzureDiscovery.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 155ce477820..b890b327e56 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; @@ -48,9 +49,9 @@ public class AzureDiscovery extends ZenDiscovery { public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService, - DiscoverySettings discoverySettings) { + DiscoverySettings discoverySettings, ElectMasterService electMasterService) { super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, - discoveryNodeService, pingService, Version.CURRENT, discoverySettings); + discoveryNodeService, pingService, electMasterService, Version.CURRENT, discoverySettings); if (settings.getAsBoolean("cloud.enabled", true)) { ImmutableList zenPings = pingService.zenPings(); UnicastZenPing unicastZenPing = null; From 29aae071c4958652d4c5d4b82cbcdea80b33bbc8 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 5 Sep 2014 16:45:31 +0200 Subject: [PATCH 057/125] Replace Strings.startsWithIgnoreCase() `Strings.startsWithIgnoreCase()` has been removed in elasticsearch 1.4 ans master. See https://github.com/elasticsearch/elasticsearch/pull/7428 Closes #35. --- .../azure/AzureStorageServiceMock.java | 27 +++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 63b1d12bd35..064823ee530 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -21,7 +21,6 @@ package org.elasticsearch.repositories.azure; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.AzureStorageService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.ImmutableMap; @@ -33,6 +32,7 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -85,7 +85,7 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent listBlobsByPrefix(String container, String keyPath, String prefix) { ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); for (String blobName : blobs.keySet()) { - if (Strings.startsWithIgnoreCase(blobName, prefix)) { + if (startsWithIgnoreCase(blobName, prefix)) { blobsBuilder.put(blobName, new PlainBlobMetaData(blobName, blobs.get(blobName).length)); } } @@ -123,4 +123,27 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent Date: Wed, 22 Oct 2014 15:29:21 +0200 Subject: [PATCH 058/125] BlobContainer interface changed in elasticsearch 1.4.0 AWS plugin needs an update because of this change https://github.com/elasticsearch/elasticsearch/pull/7551 Closes #37. --- README.md | 2 - pom.xml | 2 +- .../cloud/azure/AzureStorageService.java | 9 +-- .../cloud/azure/AzureStorageServiceImpl.java | 17 ++--- ...Container.java => AzureBlobContainer.java} | 63 +++++++++---------- .../cloud/azure/blobstore/AzureBlobStore.java | 26 ++------ .../AzureImmutableBlobContainer.java | 57 ----------------- .../azure/blobstore/AzureOutputStream.java | 46 ++++++++++++++ .../discovery/azure/AzureDiscovery.java | 3 +- .../repositories/azure/AzureRepository.java | 15 +---- .../azure/AzureStorageServiceMock.java | 39 ++++-------- 11 files changed, 107 insertions(+), 172 deletions(-) rename src/main/java/org/elasticsearch/cloud/azure/blobstore/{AbstractAzureBlobContainer.java => AzureBlobContainer.java} (75%) delete mode 100644 src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java diff --git a/README.md b/README.md index e9720a94ca6..f5970e9b236 100644 --- a/README.md +++ b/README.md @@ -378,7 +378,6 @@ The Azure repository supports following settings: * `container`: Container name. Defaults to `elasticsearch-snapshots` * `base_path`: Specifies the path within container to repository data. Defaults to empty (root directory). -* `concurrent_streams`: Throttles the number of streams (per node) preforming snapshot operation. Defaults to `5`. * `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `64m` (64m max) * `compress`: When set to `true` metadata files are stored in compressed format. This setting doesn't affect index @@ -398,7 +397,6 @@ $ curl -XPUT 'http://localhost:9200/_snapshot/my_backup2' -d '{ "settings": { "container": "backup_container", "base_path": "backups", - "concurrent_streams": 2, "chunk_size": "32m", "compress": true } diff --git a/pom.xml b/pom.xml index e0baedb018c..1e0fa3aa07d 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.9.0 + 4.10.1 onerror true onerror diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java index 700fc6ab4ac..27a0ec04bf5 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java @@ -24,8 +24,8 @@ import com.microsoft.windowsazure.services.core.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.collect.ImmutableMap; -import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.URISyntaxException; /** @@ -38,7 +38,6 @@ public interface AzureStorageService { public static final String KEY = "storage_key"; public static final String CONTAINER = "container"; public static final String BASE_PATH = "base_path"; - public static final String CONCURRENT_STREAMS = "concurrent_streams"; public static final String CHUNK_SIZE = "chunk_size"; public static final String COMPRESS = "compress"; } @@ -57,9 +56,7 @@ public interface AzureStorageService { InputStream getInputStream(String container, String blob) throws ServiceException; + OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException; + ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException; - - void putObject(String container, String blob, InputStream is, long length) throws URISyntaxException, StorageException, IOException; - - } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index e968ec7a4f5..df0b8f20aaf 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -43,8 +43,8 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.List; @@ -197,6 +197,11 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException { logger.debug("listBlobsByPrefix container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); @@ -223,16 +228,6 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent listBlobsByPrefix(@Nullable String prefix) throws IOException { diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 3958e94a956..4c4d21f757c 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -22,16 +22,13 @@ package org.elasticsearch.cloud.azure.blobstore; import com.microsoft.windowsazure.services.core.ServiceException; import com.microsoft.windowsazure.services.core.storage.StorageException; import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.blobstore.ImmutableBlobContainer; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import java.net.URISyntaxException; -import java.util.concurrent.Executor; /** * @@ -42,17 +39,10 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { private final String container; - private final Executor executor; - - private final int bufferSizeInBytes; - - public AzureBlobStore(Settings settings, AzureStorageService client, String container, Executor executor) throws URISyntaxException, StorageException { + public AzureBlobStore(Settings settings, AzureStorageService client, String container) throws URISyntaxException, StorageException { super(settings); this.client = client; this.container = container; - this.executor = executor; - - this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes(); if (!client.doesContainerExist(container)) { client.createContainer(container); @@ -72,17 +62,9 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { return container; } - public Executor executor() { - return executor; - } - - public int bufferSizeInBytes() { - return bufferSizeInBytes; - } - @Override - public ImmutableBlobContainer immutableBlobContainer(BlobPath path) { - return new AzureImmutableBlobContainer(path, this); + public BlobContainer blobContainer(BlobPath path) { + return new AzureBlobContainer(path, this); } @Override diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java deleted file mode 100644 index 2b4f80ba88b..00000000000 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureImmutableBlobContainer.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure.blobstore; - -import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.ImmutableBlobContainer; -import org.elasticsearch.common.blobstore.support.BlobStores; - -import java.io.IOException; -import java.io.InputStream; - -/** - * - */ -public class AzureImmutableBlobContainer extends AbstractAzureBlobContainer implements ImmutableBlobContainer { - - public AzureImmutableBlobContainer(BlobPath path, AzureBlobStore blobStore) { - super(path, blobStore); - } - - @Override - public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) { - blobStore.executor().execute(new Runnable() { - @Override - public void run() { - try { - blobStore.client().putObject(blobStore.container(), buildKey(blobName), is, sizeInBytes); - listener.onCompleted(); - } catch (Throwable e) { - listener.onFailure(e); - } - } - }); - } - - @Override - public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException { - BlobStores.syncWriteBlob(this, blobName, is, sizeInBytes); - } -} diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java new file mode 100644 index 00000000000..6a95eeba778 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureOutputStream.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.blobstore; + +import java.io.IOException; +import java.io.OutputStream; + +public class AzureOutputStream extends OutputStream { + + private final OutputStream blobOutputStream; + + public AzureOutputStream(OutputStream blobOutputStream) { + this.blobOutputStream = blobOutputStream; + } + + @Override + public void write(int b) throws IOException { + blobOutputStream.write(b); + } + + @Override + public void close() throws IOException { + try { + blobOutputStream.close(); + } catch (IOException e) { + // Azure is sending a "java.io.IOException: Stream is already closed." + } + } +} diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index b890b327e56..7843880c719 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.azure; -import org.elasticsearch.Version; import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; @@ -51,7 +50,7 @@ public class AzureDiscovery extends ZenDiscovery { DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, - discoveryNodeService, pingService, electMasterService, Version.CURRENT, discoverySettings); + discoveryNodeService, pingService, electMasterService, discoverySettings); if (settings.getAsBoolean("cloud.enabled", true)) { ImmutableList zenPings = pingService.zenPings(); UnicastZenPing unicastZenPing = null; diff --git a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 795ff123982..f8eb8442e32 100644 --- a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositorySettings; @@ -36,8 +35,6 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import java.io.IOException; import java.net.URISyntaxException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; /** * Azure file system implementation of the BlobStoreRepository @@ -46,7 +43,6 @@ import java.util.concurrent.TimeUnit; *
    *
    {@code container}
    Azure container name. Defaults to elasticsearch-snapshots
    *
    {@code base_path}
    Specifies the path within bucket to repository data. Defaults to root directory.
    - *
    {@code concurrent_streams}
    Number of concurrent read/write stream (per repository on each node). Defaults to 5.
    *
    {@code chunk_size}
    Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to 64mb.
    *
    {@code compress}
    If set to true metadata files will be stored compressed. Defaults to false.
    *
    @@ -80,12 +76,7 @@ public class AzureRepository extends BlobStoreRepository { String container = repositorySettings.settings().get(AzureStorageService.Fields.CONTAINER, componentSettings.get(AzureStorageService.Fields.CONTAINER, CONTAINER_DEFAULT)); - int concurrentStreams = repositorySettings.settings().getAsInt(AzureStorageService.Fields.CONCURRENT_STREAMS, - componentSettings.getAsInt(AzureStorageService.Fields.CONCURRENT_STREAMS, 5)); - ExecutorService concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 5, TimeUnit.SECONDS, - EsExecutors.daemonThreadFactory(settings, "[azure_stream]")); - - this.blobStore = new AzureBlobStore(settings, azureStorageService, container, concurrentStreamPool); + this.blobStore = new AzureBlobStore(settings, azureStorageService, container); this.chunkSize = repositorySettings.settings().getAsBytesSize(AzureStorageService.Fields.CHUNK_SIZE, componentSettings.getAsBytesSize(AzureStorageService.Fields.CHUNK_SIZE, new ByteSizeValue(64, ByteSizeUnit.MB))); @@ -109,8 +100,8 @@ public class AzureRepository extends BlobStoreRepository { } else { this.basePath = BlobPath.cleanPath(); } - logger.debug("using container [{}], chunk_size [{}], concurrent_streams [{}], compress [{}], base_path [{}]", - container, chunkSize, concurrentStreams, compress, basePath); + logger.debug("using container [{}], chunk_size [{}], compress [{}], base_path [{}]", + container, chunkSize, compress, basePath); } /** diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 064823ee530..2c1aa616894 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.azure; +import com.microsoft.windowsazure.services.core.storage.StorageException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -28,10 +29,8 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; +import java.io.*; +import java.net.URISyntaxException; import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -42,7 +41,7 @@ import java.util.concurrent.ConcurrentHashMap; public class AzureStorageServiceMock extends AbstractLifecycleComponent implements AzureStorageService { - protected Map blobs = new ConcurrentHashMap(); + protected Map blobs = new ConcurrentHashMap(); @Inject protected AzureStorageServiceMock(Settings settings) { @@ -78,7 +77,14 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent blobsBuilder = ImmutableMap.builder(); for (String blobName : blobs.keySet()) { if (startsWithIgnoreCase(blobName, prefix)) { - blobsBuilder.put(blobName, new PlainBlobMetaData(blobName, blobs.get(blobName).length)); + blobsBuilder.put(blobName, new PlainBlobMetaData(blobName, blobs.get(blobName).size())); } } ImmutableMap map = blobsBuilder.build(); return map; } - @Override - public void putObject(String container, String blob, InputStream is, long length) { - try { - ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - - int nRead; - byte[] data = new byte[65535]; - - while ((nRead = is.read(data, 0, data.length)) != -1) { - buffer.write(data, 0, nRead); - } - - buffer.flush(); - - blobs.put(blob, buffer.toByteArray()); - } catch (IOException e) { - } - } - @Override protected void doStart() throws ElasticsearchException { } From ca7540f5028deebc0650465096aa0192e696d90a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Oct 2014 15:42:30 +0100 Subject: [PATCH 059/125] Update to Lucene 4.10.2 Closes #42. --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1e0fa3aa07d..131d4559c0a 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.10.1 + 4.10.2 onerror true onerror From 3ae755ed9aaa5adfe7ff99bdb85951daebf03276 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Oct 2014 16:07:54 +0100 Subject: [PATCH 060/125] Tests: upgrade randomizedtesting-runner to 2.1.10 Closes #43. (cherry picked from commit 6e5efe6) (cherry picked from commit 675a860) --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 131d4559c0a..d2549d7d134 100644 --- a/pom.xml +++ b/pom.xml @@ -66,6 +66,12 @@ governing permissions and limitations under the License. --> 1.3.RC2 test
    + + com.carrotsearch.randomizedtesting + randomizedtesting-runner + 2.1.10 + test + org.apache.lucene lucene-test-framework From 9e44ceb9045520084e6b9cacdf773052de4ec908 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Oct 2014 16:08:34 +0100 Subject: [PATCH 061/125] Update to elasticsearch 1.4.0 Related to #30 (cherry picked from commit 442d06e) (cherry picked from commit aa27eb9) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f5970e9b236..bd591c9ce53 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,8 @@ You need to install a version matching your Elasticsearch version: | Elasticsearch | Azure Cloud Plugin| Docs | |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | -| es-1.x | Build from source | [2.5.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#azure-cloud-plugin-for-elasticsearch) | +| es-1.x | Build from source | [2.6.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#version-260-snapshot-for-elasticsearch-1x)| +| es-1.4 | Build from source | [2.5.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.4/#version-250-snapshot-for-elasticsearch-14)| | es-1.3 | 2.4.0 | [2.4.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.4.0/#version-240-for-elasticsearch-13) | | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | | es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | From ab87790ed7bd8bb80d2b5079dac88c01680588f6 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Thu, 30 Oct 2014 16:08:34 +0100 Subject: [PATCH 062/125] Docs: more information about certificate management Closes #33. --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bd591c9ce53..a1f1a9090bd 100644 --- a/README.md +++ b/README.md @@ -107,8 +107,10 @@ cat azure-cert.pem azure-pk.pem > azure.pem.txt openssl pkcs12 -export -in azure.pem.txt -out azurekeystore.pkcs12 -name azure -noiter -nomaciter ``` -Upload the generated key to Azure platform. **Important**: when prompted for a password, -you need to enter a non empty one. +Upload the `azure-certificate.cer` file both in the elasticsearch Cloud Service (under `Manage Certificates`), +and under `Settings -> Manage Certificates`. + +**Important**: when prompted for a password, you need to enter a non empty one. See this [guide](http://www.windowsazure.com/en-us/manage/linux/how-to-guides/ssh-into-linux/) to have more details on how to create keys for Azure. From 8e31e1edf14781ff33c1cac9c1c3e00caa3ecedc Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 5 Nov 2014 18:01:11 -0500 Subject: [PATCH 063/125] upgrade to lucene 5 snapshot (the compile is still broken as it was before, so untested) --- pom.xml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index d2549d7d134..c604054b1b3 100644 --- a/pom.xml +++ b/pom.xml @@ -44,7 +44,8 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT - 4.10.2 + 5.0.0 + 5.0.0-snapshot-1636426 onerror true onerror @@ -53,6 +54,17 @@ governing permissions and limitations under the License. --> INFO + + + sonatype + http://oss.sonatype.org/content/repositories/releases/ + + + Lucene snapshots + https://download.elasticsearch.org/lucenesnapshots/maven/ + + + org.hamcrest @@ -75,7 +87,7 @@ governing permissions and limitations under the License. --> org.apache.lucene lucene-test-framework - ${lucene.version} + ${lucene.maven.version} test From 957df9c4e9dc3cf2e6948bb7c03c1845ffeff44d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 5 Nov 2014 15:33:30 -0800 Subject: [PATCH 064/125] Fix constructor for ZenDiscovery's subclass --- .../org/elasticsearch/discovery/azure/AzureDiscovery.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 7843880c719..7704885cf97 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -23,6 +23,7 @@ import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNodeService; +import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.common.collect.ImmutableList; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; @@ -48,9 +49,9 @@ public class AzureDiscovery extends ZenDiscovery { public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService, - DiscoverySettings discoverySettings, ElectMasterService electMasterService) { + DiscoverySettings discoverySettings, ElectMasterService electMasterService, DynamicSettings dynamicSettings) { super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, - discoveryNodeService, pingService, electMasterService, discoverySettings); + discoveryNodeService, pingService, electMasterService, discoverySettings, dynamicSettings); if (settings.getAsBoolean("cloud.enabled", true)) { ImmutableList zenPings = pingService.zenPings(); UnicastZenPing unicastZenPing = null; From 099f9bba219e20b767f7b35c9fbbc999bfc8ccac Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Mon, 10 Nov 2014 16:46:26 -0500 Subject: [PATCH 065/125] Upgrade to Lucene 5.0.0-snapshot-1637347 --- pom.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pom.xml b/pom.xml index c604054b1b3..c81ee4e8498 100644 --- a/pom.xml +++ b/pom.xml @@ -45,7 +45,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT 5.0.0 - 5.0.0-snapshot-1636426 + 5.0.0-snapshot-1637347 onerror true onerror @@ -56,12 +56,12 @@ governing permissions and limitations under the License. --> - sonatype - http://oss.sonatype.org/content/repositories/releases/ + Lucene snapshots + https://download.elasticsearch.org/lucenesnapshots/1637347/ - Lucene snapshots - https://download.elasticsearch.org/lucenesnapshots/maven/ + sonatype + http://oss.sonatype.org/content/repositories/releases/ From 849f64d79879e873778100a793500f9467c3a2a9 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 12 Nov 2014 10:29:22 +0100 Subject: [PATCH 066/125] Change in BlobContainer deleteBlob does not return boolean Due to this change: https://github.com/elasticsearch/elasticsearch/pull/8366 Closes #44. --- .../cloud/azure/blobstore/AzureBlobContainer.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 2b3e6fc9f91..6d8c9abb69a 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -95,10 +95,9 @@ public class AzureBlobContainer extends AbstractBlobContainer { } @Override - public boolean deleteBlob(String blobName) throws IOException { + public void deleteBlob(String blobName) throws IOException { try { blobStore.client().deleteBlob(blobStore.container(), buildKey(blobName)); - return true; } catch (URISyntaxException e) { logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); throw new IOException(e); From 6aec0a05450718e0c5d6beb4b890b04ed1672d5e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 12 Nov 2014 10:34:33 +0100 Subject: [PATCH 067/125] Activate tests for forbidden names See #21. --- .../repositories/azure/AzureSnapshotRestoreITest.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 9f50a81378a..5a9866ae70a 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -22,7 +22,6 @@ package org.elasticsearch.repositories.azure; import com.microsoft.windowsazure.services.core.ServiceException; import com.microsoft.windowsazure.services.core.storage.StorageException; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -327,7 +326,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { /** * For issue #21: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21 */ - @Test @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21") + @Test public void testForbiddenContainerName() { checkContainerName("", false); checkContainerName("es", false); From f82b7e90604e20bb1e876182a8f5a059eef36f27 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 12 Nov 2014 11:02:06 +0100 Subject: [PATCH 068/125] update documentation with release 2.5.0 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a1f1a9090bd..b41d4a14232 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, run: ```sh -bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.4.0 +bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.5.0 ``` You need to install a version matching your Elasticsearch version: @@ -15,7 +15,7 @@ You need to install a version matching your Elasticsearch version: |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | | es-1.x | Build from source | [2.6.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#version-260-snapshot-for-elasticsearch-1x)| -| es-1.4 | Build from source | [2.5.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.4/#version-250-snapshot-for-elasticsearch-14)| +| es-1.4 | 2.5.0 | [2.5.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.5.0/#version-250-for-elasticsearch-14) | | es-1.3 | 2.4.0 | [2.4.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.4.0/#version-240-for-elasticsearch-13) | | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | | es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | @@ -275,7 +275,7 @@ This command should give you a JSON result: sudo service elasticsearch stop # Install the plugin -sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.0.0 +sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.5.0 # Configure it sudo vi /etc/elasticsearch/elasticsearch.yml From 300320f862244d2fe39d3fd71ea751fbe9ec034a Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Mon, 24 Nov 2014 05:52:11 -0500 Subject: [PATCH 069/125] Upgrade to Lucene 5.0.0-snapshot-1641343 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index c81ee4e8498..80bb5634c04 100644 --- a/pom.xml +++ b/pom.xml @@ -45,7 +45,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT 5.0.0 - 5.0.0-snapshot-1637347 + 5.0.0-snapshot-1641343 onerror true onerror @@ -57,7 +57,7 @@ governing permissions and limitations under the License. --> Lucene snapshots - https://download.elasticsearch.org/lucenesnapshots/1637347/ + https://download.elasticsearch.org/lucenesnapshots/1641343/ sonatype From 1e71a7e72de5c9073bd1e4ae0045fd4b8827db26 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 24 Nov 2014 17:49:38 +0100 Subject: [PATCH 070/125] UnicastHostsProvider should use version.minimumCompatibilityVersion() The UnicastHostsProvider implementation creates DiscoveryNodes that are used as an initial seed for unicast based discovery. At the moment it uses Version.CURRENT for those DiscoveryNode object, which confuses the backwards compatibility layer to think this nodes are of the latest version. This causes new nodes to fail to join old nodes as the ping serialization goes wrong. Instead we should use version.minimumCompatibilityVersion(). Closes #47. (cherry picked from commit 188179f) --- .../org/elasticsearch/discovery/azure/AzureDiscovery.java | 6 ++++-- .../discovery/azure/AzureUnicastHostsProvider.java | 7 +++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 7704885cf97..f7383d830c3 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.azure; +import org.elasticsearch.Version; import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; @@ -49,7 +50,8 @@ public class AzureDiscovery extends ZenDiscovery { public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService, - DiscoverySettings discoverySettings, ElectMasterService electMasterService, DynamicSettings dynamicSettings) { + DiscoverySettings discoverySettings, ElectMasterService electMasterService, DynamicSettings dynamicSettings, + Version version) { super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, discoveryNodeService, pingService, electMasterService, discoverySettings, dynamicSettings); if (settings.getAsBoolean("cloud.enabled", true)) { @@ -65,7 +67,7 @@ public class AzureDiscovery extends ZenDiscovery { if (unicastZenPing != null) { // update the unicast zen ping to add cloud hosts provider // and, while we are at it, use only it and not the multicast for example - unicastZenPing.addHostsProvider(new AzureUnicastHostsProvider(settings, azureService, transportService, networkService)); + unicastZenPing.addHostsProvider(new AzureUnicastHostsProvider(settings, azureService, transportService, networkService, version)); pingService.zenPings(ImmutableList.of(unicastZenPing)); } else { logger.warn("failed to apply azure unicast discovery, no unicast ping found"); diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index 57e0d99b7e7..338a2a7607f 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -53,6 +53,7 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic private final AzureComputeService azureComputeService; private TransportService transportService; private NetworkService networkService; + private final Version version; private final TimeValue refreshInterval; private long lastRefresh; @@ -62,11 +63,13 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic @Inject public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, TransportService transportService, - NetworkService networkService) { + NetworkService networkService, + Version version) { super(settings); this.azureComputeService = azureComputeService; this.transportService = transportService; this.networkService = networkService; + this.version = version; this.refreshInterval = componentSettings.getAsTime(AzureComputeService.Fields.REFRESH, settings.getAsTime("cloud.azure." + AzureComputeService.Fields.REFRESH, TimeValue.timeValueSeconds(0))); @@ -139,7 +142,7 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic TransportAddress[] addresses = transportService.addressesFromString(networkAddress); // we only limit to 1 addresses, makes no sense to ping 100 ports logger.trace("adding {}, transport_address {}", networkAddress, addresses[0]); - cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getName(), addresses[0], Version.CURRENT)); + cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getName(), addresses[0], version.minimumCompatibilityVersion())); } } From 27a12065eb3d9701ea5381b84e80ca7c9c820384 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 24 Nov 2014 18:05:21 +0100 Subject: [PATCH 071/125] The document recommends a non-durable location for data I noticed the documentation recommends a non-durable local resource for the Elasticsearch data path. Although this is acceptable for some deployments it might be worth warning people that the path is not durable and there is a potential for data loss, even with replicas data loss is theoretically possible. ``` # recommended path.data: /mnt/resource/elasticsearch/data ``` Alternatively the user could attach and use data disks which do come with a significant performance tradeoff, but premium storage options with higher IOPS have been announced and are right around the corner. Closes #46. --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b41d4a14232..6d74c80491f 100644 --- a/README.md +++ b/README.md @@ -45,9 +45,6 @@ cloud: service_name: your_azure_cloud_service_name discovery: type: azure - -# recommended -# path.data: /mnt/resource/elasticsearch/data ``` How to start (short story) @@ -294,8 +291,8 @@ cloud: discovery: type: azure -# Recommended -path.data: /mnt/resource/elasticsearch/data +# Recommended (warning: non durable disk) +# path.data: /mnt/resource/elasticsearch/data ``` Restart elasticsearch: From 69107b5ae63fa309a5fb8f5be1ade3e6d8b93a41 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 24 Nov 2014 18:17:00 +0100 Subject: [PATCH 072/125] update documentation with release 2.5.1 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 6d74c80491f..9a6adf84a96 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ The Azure Cloud plugin allows to use Azure API for the unicast discovery mechani In order to install the plugin, run: ```sh -bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.5.0 +bin/plugin install elasticsearch/elasticsearch-cloud-azure/2.5.1 ``` You need to install a version matching your Elasticsearch version: @@ -15,7 +15,7 @@ You need to install a version matching your Elasticsearch version: |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | | es-1.x | Build from source | [2.6.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#version-260-snapshot-for-elasticsearch-1x)| -| es-1.4 | 2.5.0 | [2.5.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.5.0/#version-250-for-elasticsearch-14) | +| es-1.4 | 2.5.1 | [2.5.1](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.5.1/#version-251-for-elasticsearch-14) | | es-1.3 | 2.4.0 | [2.4.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.4.0/#version-240-for-elasticsearch-13) | | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | | es-1.1 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.2.0/#azure-cloud-plugin-for-elasticsearch) | @@ -272,7 +272,7 @@ This command should give you a JSON result: sudo service elasticsearch stop # Install the plugin -sudo /usr/share/elasticsearch/bin/plugin -install elasticsearch/elasticsearch-cloud-azure/2.5.0 +sudo /usr/share/elasticsearch/bin/plugin install elasticsearch/elasticsearch-cloud-azure/2.5.1 # Configure it sudo vi /etc/elasticsearch/elasticsearch.yml From 3e612ed3e22b259d6aafdc54e93ade3f34809c7c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 2 Dec 2014 18:19:45 +0100 Subject: [PATCH 073/125] Upgrade to Lucene 5.0.0-snapshot-1642891 --- pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 80bb5634c04..7056b4209b0 100644 --- a/pom.xml +++ b/pom.xml @@ -45,7 +45,7 @@ governing permissions and limitations under the License. --> 2.0.0-SNAPSHOT 5.0.0 - 5.0.0-snapshot-1641343 + 5.0.0-snapshot-1642891 onerror true onerror @@ -57,7 +57,7 @@ governing permissions and limitations under the License. --> Lucene snapshots - https://download.elasticsearch.org/lucenesnapshots/1641343/ + https://download.elasticsearch.org/lucenesnapshots/1642891/ sonatype From dee924bb058ad088a27c3fdb142697e5157b95bf Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 2 Jan 2015 21:44:13 +0100 Subject: [PATCH 074/125] Depend on elasticsearch-parent To simplify plugins maintenance and provide more value in the future, we are starting to build an `elasticsearch-parent` project. This commit is the first step for this plugin to depend on this new `pom` maven project. --- .gitignore | 7 ++-- dev-tools/tests.policy | 54 +++++++++++++++++++++++++ pom.xml | 91 +++++++++++------------------------------- 3 files changed, 82 insertions(+), 70 deletions(-) create mode 100644 dev-tools/tests.policy diff --git a/.gitignore b/.gitignore index 38d7701b94d..fd9004dd92d 100644 --- a/.gitignore +++ b/.gitignore @@ -5,9 +5,10 @@ /target .DS_Store *.iml -/.settings /.project +/.settings /.classpath +/plugin_tools +/.local-execution-hints.log +/.local-*-execution-hints.log *.vmoptions -.local-execution-hints.log -plugin_tools diff --git a/dev-tools/tests.policy b/dev-tools/tests.policy new file mode 100644 index 00000000000..6afb5025840 --- /dev/null +++ b/dev-tools/tests.policy @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Policy file to prevent tests from writing outside the test sandbox directory +// PLEASE NOTE: You may need to enable other permissions when new tests are added, +// everything not allowed here is forbidden! + +grant { + // permissions for file access, write access only to sandbox: + permission java.io.FilePermission "<>", "read,execute"; + permission java.io.FilePermission "${junit4.childvm.cwd}", "read,execute,write"; + permission java.io.FilePermission "${junit4.childvm.cwd}${/}-", "read,execute,write,delete"; + permission java.io.FilePermission "${junit4.tempDir}${/}*", "read,execute,write,delete"; + permission groovy.security.GroovyCodeSourcePermission "/groovy/script"; + + // Allow connecting to the internet anywhere + permission java.net.SocketPermission "*", "accept,listen,connect,resolve"; + + // Basic permissions needed for Lucene / Elasticsearch to work: + permission java.util.PropertyPermission "*", "read,write"; + permission java.lang.reflect.ReflectPermission "*"; + permission java.lang.RuntimePermission "*"; + + // These two *have* to be spelled out a separate + permission java.lang.management.ManagementPermission "control"; + permission java.lang.management.ManagementPermission "monitor"; + + permission java.net.NetPermission "*"; + permission java.util.logging.LoggingPermission "control"; + permission javax.management.MBeanPermission "*", "*"; + permission javax.management.MBeanServerPermission "*"; + permission javax.management.MBeanTrustPermission "*"; + + // Needed for some things in DNS caching in the JVM + permission java.security.SecurityPermission "getProperty.networkaddress.cache.ttl"; + permission java.security.SecurityPermission "getProperty.networkaddress.cache.negative.ttl"; + +}; diff --git a/pom.xml b/pom.xml index 7056b4209b0..988d8da8da1 100644 --- a/pom.xml +++ b/pom.xml @@ -14,6 +14,13 @@ governing permissions and limitations under the License. --> xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 + + + org.elasticsearch + elasticsearch-parent + 2.0.0-SNAPSHOT + + org.elasticsearch elasticsearch-cloud-azure 3.0.0-SNAPSHOT @@ -31,88 +38,49 @@ governing permissions and limitations under the License. --> scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git - scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git - + scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git http://github.com/elasticsearch/elasticsearch-cloud-azure - - org.sonatype.oss - oss-parent - 7 - - - 2.0.0-SNAPSHOT - 5.0.0 - 5.0.0-snapshot-1642891 - onerror - true - onerror - - - INFO + - - - Lucene snapshots - https://download.elasticsearch.org/lucenesnapshots/1642891/ - - - sonatype - http://oss.sonatype.org/content/repositories/releases/ - - - org.hamcrest - hamcrest-core - 1.3.RC2 - test - - - org.hamcrest - hamcrest-library - 1.3.RC2 - test + hamcrest-all com.carrotsearch.randomizedtesting randomizedtesting-runner - 2.1.10 - test org.apache.lucene lucene-test-framework - ${lucene.maven.version} - test org.elasticsearch elasticsearch - ${elasticsearch.version} + provided + + com.microsoft.windowsazure microsoft-windowsazure-api 0.4.6 + log4j log4j - 1.2.17 - test org.elasticsearch elasticsearch - ${elasticsearch.version} test-jar - test @@ -134,16 +102,10 @@ governing permissions and limitations under the License. --> org.apache.maven.plugins maven-compiler-plugin - 3.0 - - 1.6 - 1.6 - com.carrotsearch.randomizedtesting junit4-maven-plugin - 2.0.14 tests @@ -213,6 +175,7 @@ governing permissions and limitations under the License. --> ${tests.weekly} ${tests.slow} ${tests.azure} + ${tests.config} ${tests.awaitsfix} ${tests.slow} ${tests.timeoutSuite} @@ -220,7 +183,6 @@ governing permissions and limitations under the License. --> ${tests.integration} ${tests.cluster_seed} ${tests.client.ratio} - ${tests.config} ${es.logger.level} true @@ -231,27 +193,14 @@ governing permissions and limitations under the License. --> org.apache.maven.plugins maven-surefire-plugin - 2.13 - - true - org.apache.maven.plugins maven-source-plugin - 2.2.1 - - - attach-sources - - jar - - - + org.apache.maven.plugins maven-assembly-plugin - 2.4 false ${project.build.directory}/releases/ @@ -270,4 +219,12 @@ governing permissions and limitations under the License. --> + + + + oss-snapshots + Sonatype OSS Snapshots + https://oss.sonatype.org/content/repositories/snapshots/ + + From 3afe59ff69828306868eb9931c3fb74b3b1269e7 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 14 Jan 2015 16:59:42 +0100 Subject: [PATCH 075/125] Move AzureUnicastHostsProvider to AzureDiscoveryModule Related to elasticsearch/elasticsearch#9099 Closes #48 --- .../discovery/azure/AzureDiscovery.java | 30 ++----------------- .../discovery/azure/AzureDiscoveryModule.java | 4 +++ 2 files changed, 6 insertions(+), 28 deletions(-) diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index f7383d830c3..9d3391b0874 100755 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -19,22 +19,16 @@ package org.elasticsearch.discovery.azure; -import org.elasticsearch.Version; -import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.settings.DynamicSettings; -import org.elasticsearch.common.collect.ImmutableList; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; -import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -49,29 +43,9 @@ public class AzureDiscovery extends ZenDiscovery { @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, - DiscoveryNodeService discoveryNodeService, AzureComputeService azureService, NetworkService networkService, - DiscoverySettings discoverySettings, ElectMasterService electMasterService, DynamicSettings dynamicSettings, - Version version) { + DiscoveryNodeService discoveryNodeService, + DiscoverySettings discoverySettings, ElectMasterService electMasterService, DynamicSettings dynamicSettings) { super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, discoveryNodeService, pingService, electMasterService, discoverySettings, dynamicSettings); - if (settings.getAsBoolean("cloud.enabled", true)) { - ImmutableList zenPings = pingService.zenPings(); - UnicastZenPing unicastZenPing = null; - for (ZenPing zenPing : zenPings) { - if (zenPing instanceof UnicastZenPing) { - unicastZenPing = (UnicastZenPing) zenPing; - break; - } - } - - if (unicastZenPing != null) { - // update the unicast zen ping to add cloud hosts provider - // and, while we are at it, use only it and not the multicast for example - unicastZenPing.addHostsProvider(new AzureUnicastHostsProvider(settings, azureService, transportService, networkService, version)); - pingService.zenPings(ImmutableList.of(unicastZenPing)); - } else { - logger.warn("failed to apply azure unicast discovery, no unicast ping found"); - } - } } } diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java index 490ee61719f..d66fa0a1138 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureDiscoveryModule.java @@ -40,7 +40,11 @@ public class AzureDiscoveryModule extends ZenDiscoveryModule { super(); this.logger = Loggers.getLogger(getClass(), settings); this.settings = settings; + if (AzureModule.isDiscoveryReady(settings, logger)) { + addUnicastHostProvider(AzureUnicastHostsProvider.class); + } } + @Override protected void bindDiscovery() { if (AzureModule.isDiscoveryReady(settings, logger)) { From 68fdedf91db19b9ce575b7a46a52d401e22de63a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 14:14:59 +0100 Subject: [PATCH 076/125] [cleanup] Change Logger factory Related to #52 (cherry picked from commit a6435ab) (cherry picked from commit f82ee58) --- .../cloud/azure/blobstore/AzureBlobContainer.java | 4 ++-- .../elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 6d8c9abb69a..854552d7c7e 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.collect.ImmutableMap; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; import java.io.FileNotFoundException; import java.io.IOException; @@ -41,7 +41,7 @@ import java.net.URISyntaxException; */ public class AzureBlobContainer extends AbstractBlobContainer { - protected final ESLogger logger = ESLoggerFactory.getLogger(AzureBlobContainer.class.getName()); + protected final ESLogger logger = Loggers.getLogger(AzureBlobContainer.class); protected final AzureBlobStore blobStore; protected final String keyPath; diff --git a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java index 469ce6a0ddb..e4122e4ef7f 100644 --- a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java +++ b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -23,7 +23,7 @@ import org.elasticsearch.cloud.azure.AzureModule; import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.AbstractPlugin; import org.elasticsearch.repositories.RepositoriesModule; @@ -38,7 +38,7 @@ import java.util.Collection; public class CloudAzurePlugin extends AbstractPlugin { private final Settings settings; - private final ESLogger logger = ESLoggerFactory.getLogger(CloudAzurePlugin.class.getName()); + protected final ESLogger logger = Loggers.getLogger(CloudAzurePlugin.class); public CloudAzurePlugin(Settings settings) { this.settings = settings; From eaf3fbc6d773faa8b4556fca9a0e771d75649762 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 14:23:40 +0100 Subject: [PATCH 077/125] [cleanup] simplify logging debug/trace code [logging] don't use anymore `if (logger.isTraceEnabled())` or `if (logger.isDebugEnabled())` Related to #52. (cherry picked from commit 95381d4) (cherry picked from commit 6d5ce44) --- .../cloud/azure/AzureComputeServiceImpl.java | 11 +++--- .../cloud/azure/AzureStorageServiceImpl.java | 39 +++++-------------- .../azure/AzureUnicastHostsProvider.java | 9 ++--- 3 files changed, 18 insertions(+), 41 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index cb74062723e..52c6702a437 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -83,8 +83,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent instances() { if (socketFactory == null) { // Azure plugin is disabled - if (logger.isTraceEnabled()) logger.trace("azure plugin is disabled. Returning an empty list of nodes."); + logger.trace("azure plugin is disabled. Returning an empty list of nodes."); return new HashSet(); } else { try { InputStream stream = getXML("/services/hostedservices/" + service_name + "?embed-detail=true"); Set instances = buildInstancesFromXml(stream, port_name); - if (logger.isTraceEnabled()) logger.trace("get instances from azure: {}", instances); + logger.trace("get instances from azure: {}", instances); return instances; diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index df0b8f20aaf..a2a15c14c2b 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -76,8 +76,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent blobs = service.listBlobs(container, options).getBlobs(); for (ListBlobsResult.BlobEntry blob : blobs) { - if (logger.isTraceEnabled()) { - logger.trace("removing in container [{}], path [{}], blob [{}]", - container, path, blob.getName()); - } + logger.trace("removing in container [{}], path [{}], blob [{}]", container, path, blob.getName()); service.deleteBlob(container, blob.getName()); } } @@ -173,18 +162,12 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent blobs = blob_container.listBlobs(keyPath + prefix); for (ListBlobItem blob : blobs) { URI uri = blob.getUri(); - if (logger.isTraceEnabled()) { - logger.trace("blob url [{}]", uri); - } + logger.trace("blob url [{}]", uri); String blobpath = uri.getPath().substring(container.length() + 1); BlobProperties properties = service.getBlobProperties(container, blobpath).getProperties(); String name = blobpath.substring(keyPath.length() + 1); - if (logger.isTraceEnabled()) { - logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getContentLength()); - } + logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getContentLength()); blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getContentLength())); } } diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index 338a2a7607f..242812a8d3f 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -89,7 +89,7 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic if (refreshInterval.millis() != 0) { if (cachedDiscoNodes != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) { - if (logger.isTraceEnabled()) logger.trace("using cache to retrieve node list"); + logger.trace("using cache to retrieve node list"); return cachedDiscoNodes; } lastRefresh = System.currentTimeMillis(); @@ -106,10 +106,10 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic if (inetAddress != null) { ipAddress = inetAddress.getHostAddress(); } - if (logger.isTraceEnabled()) logger.trace("ipAddress found: [{}]", ipAddress); + logger.trace("ipAddress found: [{}]", ipAddress); } catch (IOException e) { // We can't find the publish host address... Hmmm. Too bad :-( - if (logger.isTraceEnabled()) logger.trace("exception while finding ipAddress", e); + logger.trace("exception while finding ipAddress", e); } try { @@ -118,10 +118,9 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic // Let's detect if we want to use public or private IP if (host_type == HostType.PRIVATE_IP) { if (instance.getPrivateIp() != null) { - if (logger.isTraceEnabled() && instance.getPrivateIp().equals(ipAddress)) { + if (instance.getPrivateIp().equals(ipAddress)) { logger.trace("adding ourselves {}", ipAddress); } - networkAddress = instance.getPrivateIp(); } else { logger.trace("no private ip provided ignoring {}", instance.getName()); From d11a813bcef2aaea0dc6427e83de59416ffcd5ab Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 14:25:39 +0100 Subject: [PATCH 078/125] [cleanup] remove useless `;` Related to #52. (cherry picked from commit 0899438) (cherry picked from commit d1a9b0b) --- src/main/java/org/elasticsearch/cloud/azure/Instance.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/Instance.java b/src/main/java/org/elasticsearch/cloud/azure/Instance.java index e5721d51c72..e81987b66d1 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/Instance.java +++ b/src/main/java/org/elasticsearch/cloud/azure/Instance.java @@ -24,7 +24,7 @@ package org.elasticsearch.cloud.azure; */ public class Instance { public static enum Status { - STARTED; + STARTED } private String privateIp; From 0e81e351c216cab0026b2a5367ea6d3eb6b04a69 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 14:41:38 +0100 Subject: [PATCH 079/125] [cleanup] use diamond operator With Java 7, we can change `Set = new HashSet()` to `Set = new HashSet<>()` Related to #52. (cherry picked from commit 0c709de) (cherry picked from commit 4f474f5) --- .../cloud/azure/AzureComputeServiceImpl.java | 11 +++-------- .../cloud/azure/AzureStorageServiceImpl.java | 1 - .../azure/AzureComputeServiceSimpleMock.java | 2 +- .../azure/AzureComputeServiceTwoNodesMock.java | 2 +- .../discovery/azure/AzureInstanceXmlParserTest.java | 2 +- .../repositories/azure/AzureStorageServiceMock.java | 2 +- 6 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index 52c6702a437..bd9f8495c6d 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -110,29 +110,24 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent(); + return new HashSet<>(); } else { try { InputStream stream = getXML("/services/hostedservices/" + service_name + "?embed-detail=true"); Set instances = buildInstancesFromXml(stream, port_name); logger.trace("get instances from azure: {}", instances); - return instances; - } catch (ParserConfigurationException e) { logger.warn("can not parse XML response: {}", e.getMessage()); - return new HashSet(); } catch (XPathExpressionException e) { logger.warn("can not parse XML response: {}", e.getMessage()); - return new HashSet(); } catch (SAXException e) { logger.warn("can not parse XML response: {}", e.getMessage()); - return new HashSet(); } catch (Exception e) { logger.warn("can not get list of azure nodes: {}", e.getMessage()); - return new HashSet(); } } + return new HashSet<>(); } private static String extractValueFromPath(Node node, String path) throws XPathExpressionException { @@ -142,7 +137,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent buildInstancesFromXml(InputStream inputStream, String port_name) throws ParserConfigurationException, IOException, SAXException, XPathExpressionException { - Set instances = new HashSet(); + Set instances = new HashSet<>(); DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index a2a15c14c2b..f949838c8e5 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -168,7 +168,6 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent instances() { - Set instances = new HashSet(); + Set instances = new HashSet<>(); Instance azureHost = new Instance(); azureHost.setPrivateIp("127.0.0.1"); instances.add(azureHost); diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java index c829a6260d4..a04366bf5e0 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java @@ -38,7 +38,7 @@ public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstract @Override public Set instances() { - Set instances = new HashSet(); + Set instances = new HashSet<>(); Instance azureHost = new Instance(); azureHost.setPrivateIp("127.0.0.1"); instances.add(azureHost); diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java index bb8fbcf6700..4593c4333ba 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java @@ -52,7 +52,7 @@ public class AzureInstanceXmlParserTest { InputStream inputStream = AzureInstanceXmlParserTest.class.getResourceAsStream("/org/elasticsearch/azure/test/services.xml"); Set instances = AzureComputeServiceImpl.buildInstancesFromXml(inputStream, "elasticsearch"); - Set expected = new HashSet(); + Set expected = new HashSet<>(); expected.add(build("es-windows2008", "10.53.250.55", null, null, Instance.Status.STARTED)); expected.add(build("myesnode1", "10.53.218.75", "137.116.213.150", "9300", Instance.Status.STARTED)); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 2c1aa616894..67fdc6b7d28 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -41,7 +41,7 @@ import java.util.concurrent.ConcurrentHashMap; public class AzureStorageServiceMock extends AbstractLifecycleComponent implements AzureStorageService { - protected Map blobs = new ConcurrentHashMap(); + protected Map blobs = new ConcurrentHashMap<>(); @Inject protected AzureStorageServiceMock(Settings settings) { From 7c27d53ee15b4fd411b71a049a204d826960c910 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 14:45:20 +0100 Subject: [PATCH 080/125] [cleanup] collapse identical catch blocks With Java7, you don't need multiple identical catch blocks anymore Related to #52. (cherry picked from commit 322e1e5) (cherry picked from commit 9d3b0ad) --- .../cloud/azure/AzureComputeServiceImpl.java | 6 +----- .../azure/blobstore/AzureBlobContainer.java | 17 +++-------------- .../cloud/azure/blobstore/AzureBlobStore.java | 6 +----- 3 files changed, 5 insertions(+), 24 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java index bd9f8495c6d..71372544973 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java @@ -117,11 +117,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent instances = buildInstancesFromXml(stream, port_name); logger.trace("get instances from azure: {}", instances); return instances; - } catch (ParserConfigurationException e) { - logger.warn("can not parse XML response: {}", e.getMessage()); - } catch (XPathExpressionException e) { - logger.warn("can not parse XML response: {}", e.getMessage()); - } catch (SAXException e) { + } catch (ParserConfigurationException | XPathExpressionException | SAXException e) { logger.warn("can not parse XML response: {}", e.getMessage()); } catch (Exception e) { logger.warn("can not get list of azure nodes: {}", e.getMessage()); diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 854552d7c7e..9b530ee977b 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -60,9 +60,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { public boolean blobExists(String blobName) { try { return blobStore.client().blobExists(blobStore.container(), buildKey(blobName)); - } catch (URISyntaxException e) { - logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); - } catch (StorageException e) { + } catch (URISyntaxException | StorageException e) { logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); } return false; @@ -98,10 +96,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { public void deleteBlob(String blobName) throws IOException { try { blobStore.client().deleteBlob(blobStore.container(), buildKey(blobName)); - } catch (URISyntaxException e) { - logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); - throw new IOException(e); - } catch (StorageException e) { + } catch (URISyntaxException | StorageException e) { logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore.container(), e.getMessage()); throw new IOException(e); } @@ -112,13 +107,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { try { return blobStore.client().listBlobsByPrefix(blobStore.container(), keyPath, prefix); - } catch (URISyntaxException e) { - logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); - throw new IOException(e); - } catch (StorageException e) { - logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); - throw new IOException(e); - } catch (ServiceException e) { + } catch (URISyntaxException | StorageException | ServiceException e) { logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); throw new IOException(e); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 4c4d21f757c..e68b69070a6 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -76,11 +76,7 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { try { client.deleteFiles(container, keyPath); - } catch (URISyntaxException e) { - logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); - } catch (StorageException e) { - logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); - } catch (ServiceException e) { + } catch (URISyntaxException | StorageException | ServiceException e) { logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); } } From b7b8db97c7b371be2a805bfb4e454dbf5391dfd3 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 15:37:47 +0100 Subject: [PATCH 081/125] [package] stax-api is added twice Library stax-api is added twice in final ZIP file. It's due to the dependency tree. ``` [INFO] --- maven-dependency-plugin:2.8:tree (default-cli) @ elasticsearch-cloud-azure --- [INFO] org.elasticsearch:elasticsearch-cloud-azure:jar:2.5.2-SNAPSHOT [INFO] +- org.hamcrest:hamcrest-core:jar:1.3.RC2:test [INFO] +- org.hamcrest:hamcrest-library:jar:1.3.RC2:test [INFO] +- com.carrotsearch.randomizedtesting:randomizedtesting-runner:jar:2.1.10:test [INFO] | \- junit:junit:jar:4.10:test [INFO] +- org.apache.lucene:lucene-test-framework:jar:4.10.2:test [INFO] | +- org.apache.lucene:lucene-codecs:jar:4.10.2:test [INFO] | +- org.apache.lucene:lucene-core:jar:4.10.2:compile [INFO] | +- com.carrotsearch.randomizedtesting:junit4-ant:jar:2.1.6:test [INFO] | \- org.apache.ant:ant:jar:1.8.2:test [INFO] +- org.elasticsearch:elasticsearch:jar:1.4.2:compile [INFO] | +- org.apache.lucene:lucene-analyzers-common:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-queries:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-memory:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-highlighter:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-queryparser:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-sandbox:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-suggest:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-misc:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-join:jar:4.10.2:compile [INFO] | +- org.apache.lucene:lucene-grouping:jar:4.10.2:compile [INFO] | \- org.apache.lucene:lucene-spatial:jar:4.10.2:compile [INFO] | \- com.spatial4j:spatial4j:jar:0.4.1:compile [INFO] +- com.microsoft.windowsazure:microsoft-windowsazure-api:jar:0.4.6:compile [INFO] | +- com.sun.jersey:jersey-client:jar:1.13:compile [INFO] | | \- com.sun.jersey:jersey-core:jar:1.13:compile [INFO] | +- javax.inject:javax.inject:jar:1:compile [INFO] | +- com.sun.jersey:jersey-json:jar:1.13:compile [INFO] | | +- org.codehaus.jettison:jettison:jar:1.1:compile [INFO] | | | \- stax:stax-api:jar:1.0.1:compile [INFO] | | +- com.sun.xml.bind:jaxb-impl:jar:2.2.3-1:compile [INFO] | | | \- javax.xml.bind:jaxb-api:jar:2.2.2:compile [INFO] | | | \- javax.xml.stream:stax-api:jar:1.0-2:compile [INFO] | | +- org.codehaus.jackson:jackson-core-asl:jar:1.9.2:compile [INFO] | | +- org.codehaus.jackson:jackson-mapper-asl:jar:1.9.2:compile [INFO] | | +- org.codehaus.jackson:jackson-jaxrs:jar:1.9.2:compile [INFO] | | \- org.codehaus.jackson:jackson-xc:jar:1.9.2:compile [INFO] | +- commons-logging:commons-logging:jar:1.1.1:compile [INFO] | +- javax.mail:mail:jar:1.4.5:compile [INFO] | | \- javax.activation:activation:jar:1.1:compile [INFO] | \- org.apache.commons:commons-lang3:jar:3.1:compile [INFO] +- log4j:log4j:jar:1.2.17:compile [INFO] \- org.elasticsearch:elasticsearch:test-jar:tests:1.4.2:test ``` We should exclude `javax.xml.stream:stax-api:jar:1.0-2:compile` from `com.microsoft.windowsazure:microsoft-windowsazure-api:jar:0.4.6:compile` Closes #56. (cherry picked from commit 68efe93) (cherry picked from commit 68f25d1) --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index 988d8da8da1..b70f84d5119 100644 --- a/pom.xml +++ b/pom.xml @@ -71,6 +71,12 @@ governing permissions and limitations under the License. --> com.microsoft.windowsazure microsoft-windowsazure-api 0.4.6 + + + javax.xml.stream + stax-api + + From b91f5da2b2a09797a04448e64fd3643f6c030e01 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 14:17:55 +0100 Subject: [PATCH 082/125] Failure when creating a container should raise an exception When you remove a container from Azure console, even if Azure told you that it has been done, it appears to be an asynchronous deletion so you can hit error like `can not initialize container [elasticsearch-snapshots]: [The specified container is being deleted. Try operation later.]` but this error does not fail the repository creation... Related to #54. (cherry picked from commit e483304) (cherry picked from commit 520444c) --- .../cloud/azure/AzureStorageServiceImpl.java | 16 ++++-- .../azure/blobstore/AzureBlobContainer.java | 7 ++- .../cloud/azure/blobstore/AzureBlobStore.java | 19 ++++--- .../repositories/azure/AzureRepository.java | 52 +++++++++++++++---- .../azure/AzureSnapshotRestoreITest.java | 44 ++++++++++++++++ 5 files changed, 115 insertions(+), 23 deletions(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index f949838c8e5..e2ae77a1974 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.repositories.RepositoryException; import java.io.InputStream; import java.io.OutputStream; @@ -125,9 +126,14 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException { - logger.debug("listBlobsByPrefix container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); + logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); CloudBlobContainer blob_container = client.getContainerReference(container); diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 9b530ee977b..ed238c06664 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.collect.ImmutableMap; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.repositories.RepositoryException; import java.io.FileNotFoundException; import java.io.IOException; @@ -45,8 +46,9 @@ public class AzureBlobContainer extends AbstractBlobContainer { protected final AzureBlobStore blobStore; protected final String keyPath; + protected final String repositoryName; - public AzureBlobContainer(BlobPath path, AzureBlobStore blobStore) { + public AzureBlobContainer(String repositoryName, BlobPath path, AzureBlobStore blobStore) { super(path); this.blobStore = blobStore; String keyPath = path.buildAsString("/"); @@ -54,6 +56,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { keyPath = keyPath + "/"; } this.keyPath = keyPath; + this.repositoryName = repositoryName; } @Override @@ -89,6 +92,8 @@ public class AzureBlobContainer extends AbstractBlobContainer { throw new IOException(e); } catch (URISyntaxException e) { throw new IOException(e); + } catch (IllegalArgumentException e) { + throw new RepositoryException(repositoryName, e.getMessage()); } } diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index e68b69070a6..79f4bc62c54 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -26,7 +26,11 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryName; +import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.repositories.azure.AzureRepository; import java.net.URISyntaxException; @@ -38,15 +42,16 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { private final AzureStorageService client; private final String container; + private final String repositoryName; - public AzureBlobStore(Settings settings, AzureStorageService client, String container) throws URISyntaxException, StorageException { + @Inject + public AzureBlobStore(RepositoryName name, Settings settings, RepositorySettings repositorySettings, + AzureStorageService client) throws URISyntaxException, StorageException { super(settings); this.client = client; - this.container = container; - - if (!client.doesContainerExist(container)) { - client.createContainer(container); - } + this.container = repositorySettings.settings().get(AzureStorageService.Fields.CONTAINER, + componentSettings.get(AzureStorageService.Fields.CONTAINER, AzureRepository.CONTAINER_DEFAULT)); + this.repositoryName = name.getName(); } @Override @@ -64,7 +69,7 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { @Override public BlobContainer blobContainer(BlobPath path) { - return new AzureBlobContainer(path, this); + return new AzureBlobContainer(repositoryName, path, this); } @Override diff --git a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index f8eb8442e32..36882fa281c 100644 --- a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -22,16 +22,21 @@ package org.elasticsearch.repositories.azure; import com.microsoft.windowsazure.services.core.storage.StorageException; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.collect.ImmutableList; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositorySettings; +import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotCreationException; import java.io.IOException; import java.net.URISyntaxException; @@ -60,23 +65,16 @@ public class AzureRepository extends BlobStoreRepository { private boolean compress; - /** - * Constructs new shared file system repository - * - * @param name repository name - * @param repositorySettings repository settings - * @param indexShardRepository index shard repository - * @param azureStorageService Azure Storage service - * @throws java.io.IOException - */ @Inject - public AzureRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, AzureStorageService azureStorageService) throws IOException, URISyntaxException, StorageException { + public AzureRepository(RepositoryName name, RepositorySettings repositorySettings, + IndexShardRepository indexShardRepository, + AzureBlobStore azureBlobStore) throws IOException, URISyntaxException, StorageException { super(name.getName(), repositorySettings, indexShardRepository); String container = repositorySettings.settings().get(AzureStorageService.Fields.CONTAINER, componentSettings.get(AzureStorageService.Fields.CONTAINER, CONTAINER_DEFAULT)); - this.blobStore = new AzureBlobStore(settings, azureStorageService, container); + this.blobStore = azureBlobStore; this.chunkSize = repositorySettings.settings().getAsBytesSize(AzureStorageService.Fields.CHUNK_SIZE, componentSettings.getAsBytesSize(AzureStorageService.Fields.CHUNK_SIZE, new ByteSizeValue(64, ByteSizeUnit.MB))); @@ -133,5 +131,37 @@ public class AzureRepository extends BlobStoreRepository { return chunkSize; } + @Override + public void initializeSnapshot(SnapshotId snapshotId, ImmutableList indices, MetaData metaData) { + try { + if (!blobStore.client().doesContainerExist(blobStore.container())) { + logger.debug("container [{}] does not exist. Creating...", blobStore.container()); + blobStore.client().createContainer(blobStore.container()); + } + super.initializeSnapshot(snapshotId, indices, metaData); + } catch (StorageException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new SnapshotCreationException(snapshotId, e); + } catch (URISyntaxException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new SnapshotCreationException(snapshotId, e); + } + } + @Override + public String startVerification() { + try { + if (!blobStore.client().doesContainerExist(blobStore.container())) { + logger.debug("container [{}] does not exist. Creating...", blobStore.container()); + blobStore.client().createContainer(blobStore.container()); + } + return super.startVerification(); + } catch (StorageException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new RepositoryVerificationException(repositoryName, "can not initialize container " + blobStore.container(), e); + } catch (URISyntaxException e) { + logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage()); + throw new RepositoryVerificationException(repositoryName, "can not initialize container " + blobStore.container(), e); + } + } } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 5a9866ae70a..5697295be71 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -31,10 +31,12 @@ import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.base.Predicate; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -44,6 +46,7 @@ import org.junit.Before; import org.junit.Test; import java.net.URISyntaxException; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.*; @@ -388,6 +391,47 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { } } + /** + * When a user remove a container you can not immediately create it again. + */ + @Test + public void testRemoveAndCreateContainer() throws URISyntaxException, StorageException, InterruptedException { + final String container = getContainerName(); + final AzureStorageService storageService = internalCluster().getInstance(AzureStorageService.class); + + // It could happen that we run this test really close to a previous one + // so we might need some time to be able to create the container + assertThat(awaitBusy(new Predicate() { + public boolean apply(Object obj) { + try { + storageService.createContainer(container); + logger.debug(" -> container created..."); + return true; + } catch (URISyntaxException e) { + // Incorrect URL. This should never happen. + return false; + } catch (StorageException e) { + // It could happen. Let's wait for a while. + logger.debug(" -> container is being removed. Let's wait a bit..."); + return false; + } + } + }, 30, TimeUnit.SECONDS), equalTo(true)); + storageService.removeContainer(container); + + ClusterAdminClient client = client().admin().cluster(); + logger.info("--> creating azure repository while container is being removed"); + try { + client.preparePutRepository("test-repo").setType("azure") + .setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, container) + ).get(); + fail("we should get a RepositoryVerificationException"); + } catch (RepositoryVerificationException e) { + // Fine we expect that + } + } + /** * Deletes repositories, supports wildcard notation. */ From c20371aa8ff6c507dd3f15b2dd0c7f83e2872f39 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 30 Jan 2015 18:31:00 +0100 Subject: [PATCH 083/125] Support new method `BlobContainer#move()` This has been added in elasticsearch 2.0.0. See elasticsearch/elasticsearch#8782 Wondering if this PR should be split in two parts: * one that clean the code and which is portable to 1.4, 1.x and master * one that simply add move() method for master branch @imotov WDYT? Closes #49. --- .../cloud/azure/AzureStorageService.java | 2 ++ .../cloud/azure/AzureStorageServiceImpl.java | 13 +++++++ .../azure/blobstore/AzureBlobContainer.java | 18 ++++++++++ .../azure/AzureStorageServiceMock.java | 34 +++++++++++++++++++ 4 files changed, 67 insertions(+) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java index 27a0ec04bf5..80d948c259a 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java @@ -59,4 +59,6 @@ public interface AzureStorageService { OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException; ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException; + + void moveBlob(String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException; } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index e2ae77a1974..1254f8bc785 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -214,6 +214,19 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent done", container, sourceBlob, targetBlob); + } + } + @Override protected void doStart() throws ElasticsearchException { logger.debug("starting azure storage client instance"); diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index ed238c06664..dd032591eb8 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -118,6 +118,24 @@ public class AzureBlobContainer extends AbstractBlobContainer { } } + @Override + public void move(String sourceBlobName, String targetBlobName) throws IOException { + try { + String source = keyPath + sourceBlobName; + String target = keyPath + targetBlobName; + + logger.debug("moving blob [{}] to [{}] in container {{}}", source, target, blobStore.container()); + + blobStore.client().moveBlob(blobStore.container(), source, target); + } catch (URISyntaxException e) { + logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } catch (StorageException e) { + logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore.container(), e.getMessage()); + throw new IOException(e); + } + } + @Override public ImmutableMap listBlobs() throws IOException { return listBlobsByPrefix(null); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 67fdc6b7d28..257a0c4728a 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -99,6 +99,17 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent Date: Wed, 4 Feb 2015 17:11:56 +0100 Subject: [PATCH 084/125] Use Azure Storage 2.0.0 Microsoft team has released a new specific project to deal with storage with a much cleaner API than the previous version. See https://github.com/azure/azure-storage-java Documentation is here: http://azure.microsoft.com/en-us/documentation/articles/storage-java-how-to-use-blob-storage/ Note that the produced ZIP file has been reduced from 5mb to 1.3mb. Related to #38 (cherry picked from commit 4467254) (cherry picked from commit b2f1e4d) --- pom.xml | 12 +--- src/main/assemblies/plugin.xml | 2 +- .../cloud/azure/AzureStorageService.java | 9 ++- .../cloud/azure/AzureStorageServiceImpl.java | 67 ++++++------------- .../azure/blobstore/AzureBlobContainer.java | 9 +-- .../cloud/azure/blobstore/AzureBlobStore.java | 5 +- .../repositories/azure/AzureRepository.java | 2 +- .../AbstractAzureRepositoryServiceTest.java | 7 +- .../azure/AzureSnapshotRestoreITest.java | 13 ++-- .../azure/AzureStorageServiceMock.java | 7 +- 10 files changed, 51 insertions(+), 82 deletions(-) diff --git a/pom.xml b/pom.xml index b70f84d5119..387c788b755 100644 --- a/pom.xml +++ b/pom.xml @@ -68,15 +68,9 @@ governing permissions and limitations under the License. --> - com.microsoft.windowsazure - microsoft-windowsazure-api - 0.4.6 - - - javax.xml.stream - stax-api - - + com.microsoft.azure + azure-storage + 2.0.0 diff --git a/src/main/assemblies/plugin.xml b/src/main/assemblies/plugin.xml index b18ecaff3a0..3de1c5befa5 100644 --- a/src/main/assemblies/plugin.xml +++ b/src/main/assemblies/plugin.xml @@ -30,7 +30,7 @@ governing permissions and limitations under the License. --> true true - com.microsoft.windowsazure:microsoft-windowsazure-api + com.microsoft.azure:azure-storage diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java index 80d948c259a..97b5c26a69e 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java @@ -19,8 +19,7 @@ package org.elasticsearch.cloud.azure; -import com.microsoft.windowsazure.services.core.ServiceException; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.collect.ImmutableMap; @@ -48,17 +47,17 @@ public interface AzureStorageService { void createContainer(String container) throws URISyntaxException, StorageException; - void deleteFiles(String container, String path) throws URISyntaxException, StorageException, ServiceException; + void deleteFiles(String container, String path) throws URISyntaxException, StorageException; boolean blobExists(String container, String blob) throws URISyntaxException, StorageException; void deleteBlob(String container, String blob) throws URISyntaxException, StorageException; - InputStream getInputStream(String container, String blob) throws ServiceException; + InputStream getInputStream(String container, String blob) throws URISyntaxException, StorageException; OutputStream getOutputStream(String container, String blob) throws URISyntaxException, StorageException; - ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException; + ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException; void moveBlob(String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException; } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index 1254f8bc785..894ad5d413c 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -19,21 +19,9 @@ package org.elasticsearch.cloud.azure; -import com.microsoft.windowsazure.services.blob.BlobConfiguration; -import com.microsoft.windowsazure.services.blob.BlobContract; -import com.microsoft.windowsazure.services.blob.BlobService; -import com.microsoft.windowsazure.services.blob.client.CloudBlobClient; -import com.microsoft.windowsazure.services.blob.client.CloudBlobContainer; -import com.microsoft.windowsazure.services.blob.client.CloudBlockBlob; -import com.microsoft.windowsazure.services.blob.client.ListBlobItem; -import com.microsoft.windowsazure.services.blob.models.BlobProperties; -import com.microsoft.windowsazure.services.blob.models.GetBlobResult; -import com.microsoft.windowsazure.services.blob.models.ListBlobsOptions; -import com.microsoft.windowsazure.services.blob.models.ListBlobsResult; -import com.microsoft.windowsazure.services.core.Configuration; -import com.microsoft.windowsazure.services.core.ServiceException; -import com.microsoft.windowsazure.services.core.storage.CloudStorageAccount; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.*; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; @@ -48,7 +36,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; -import java.util.List; /** * @@ -60,10 +47,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent blobs = service.listBlobs(container, options).getBlobs(); - for (ListBlobsResult.BlobEntry blob : blobs) { - logger.trace("removing in container [{}], path [{}], blob [{}]", container, path, blob.getName()); - service.deleteBlob(container, blob.getName()); + for (ListBlobItem blobItem : blob_container.listBlobs(path)) { + logger.trace("removing blob [{}]", blobItem.getUri()); + deleteBlob(container, blobItem.getUri().toString()); } } } @@ -180,10 +157,9 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException, ServiceException { + public ImmutableMap listBlobsByPrefix(String container, String keyPath, String prefix) throws URISyntaxException, StorageException { logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix); ImmutableMap.Builder blobsBuilder = ImmutableMap.builder(); CloudBlobContainer blob_container = client.getContainerReference(container); if (blob_container.exists()) { - Iterable blobs = blob_container.listBlobs(keyPath + prefix); - for (ListBlobItem blob : blobs) { - URI uri = blob.getUri(); + for (ListBlobItem blobItem : blob_container.listBlobs(keyPath + prefix)) { + URI uri = blobItem.getUri(); logger.trace("blob url [{}]", uri); String blobpath = uri.getPath().substring(container.length() + 1); - BlobProperties properties = service.getBlobProperties(container, blobpath).getProperties(); + BlobProperties properties = blob_container.getBlockBlobReference(blobpath).getProperties(); String name = blobpath.substring(keyPath.length() + 1); - logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getContentLength()); - blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getContentLength())); + logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength()); + blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); } } @@ -221,7 +196,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent done", container, sourceBlob, targetBlob); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index dd032591eb8..fd8aeca30f3 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -19,8 +19,7 @@ package org.elasticsearch.cloud.azure.blobstore; -import com.microsoft.windowsazure.services.core.ServiceException; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; @@ -73,11 +72,13 @@ public class AzureBlobContainer extends AbstractBlobContainer { public InputStream openInput(String blobName) throws IOException { try { return blobStore.client().getInputStream(blobStore.container(), buildKey(blobName)); - } catch (ServiceException e) { + } catch (StorageException e) { if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { throw new FileNotFoundException(e.getMessage()); } throw new IOException(e); + } catch (URISyntaxException e) { + throw new IOException(e); } } @@ -112,7 +113,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { try { return blobStore.client().listBlobsByPrefix(blobStore.container(), keyPath, prefix); - } catch (URISyntaxException | StorageException | ServiceException e) { + } catch (URISyntaxException | StorageException e) { logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore.container(), e.getMessage()); throw new IOException(e); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 79f4bc62c54..12581d3b63e 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -19,8 +19,7 @@ package org.elasticsearch.cloud.azure.blobstore; -import com.microsoft.windowsazure.services.core.ServiceException; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; @@ -81,7 +80,7 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { try { client.deleteFiles(container, keyPath); - } catch (URISyntaxException | StorageException | ServiceException e) { + } catch (URISyntaxException | StorageException e) { logger.warn("can not remove [{}] in container {{}}: {}", keyPath, container, e.getMessage()); } } diff --git a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 36882fa281c..5cca6d2543d 100644 --- a/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -19,7 +19,7 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; import org.elasticsearch.cluster.metadata.MetaData; diff --git a/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java b/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java index 4c92b910f0b..38b8a4fea83 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureRepositoryServiceTest.java @@ -19,8 +19,7 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.windowsazure.services.core.ServiceException; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.AbstractAzureTest; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -87,7 +86,7 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe } @Before @After - public final void wipe() throws StorageException, ServiceException, URISyntaxException { + public final void wipe() throws StorageException, URISyntaxException { wipeRepositories(); cleanRepositoryFiles(basePath); } @@ -95,7 +94,7 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe /** * Purge the test container */ - public void cleanRepositoryFiles(String path) throws StorageException, ServiceException, URISyntaxException { + public void cleanRepositoryFiles(String path) throws StorageException, URISyntaxException { String container = internalCluster().getInstance(Settings.class).get("repositories.azure.container"); logger.info("--> remove blobs in container [{}]", container); AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 5697295be71..490bb5f35bc 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -20,8 +20,7 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.windowsazure.services.core.ServiceException; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -83,7 +82,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { } @Before - public final void wipeBefore() throws StorageException, ServiceException, URISyntaxException { + public final void wipeBefore() throws StorageException, URISyntaxException { wipeRepositories(); cleanRepositoryFiles( getContainerName(), @@ -92,7 +91,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { } @After - public final void wipeAfter() throws StorageException, ServiceException, URISyntaxException { + public final void wipeAfter() throws StorageException, URISyntaxException { wipeRepositories(); cleanRepositoryFiles( getContainerName(), @@ -243,7 +242,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { * For issue #26: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/26 */ @Test - public void testListBlobs_26() throws StorageException, ServiceException, URISyntaxException { + public void testListBlobs_26() throws StorageException, URISyntaxException { createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -302,7 +301,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { * For issue #28: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/28 */ @Test - public void testGetDeleteNonExistingSnapshot_28() throws StorageException, ServiceException, URISyntaxException { + public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { ClusterAdminClient client = client().admin().cluster(); logger.info("--> creating azure repository without any path"); PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure") @@ -452,7 +451,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { /** * Purge the test containers */ - public void cleanRepositoryFiles(String... containers) throws StorageException, ServiceException, URISyntaxException { + public void cleanRepositoryFiles(String... containers) throws StorageException, URISyntaxException { AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); for (String container : containers) { logger.info("--> remove container [{}]", container); diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 257a0c4728a..3ce49321b2b 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -19,7 +19,7 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.windowsazure.services.core.storage.StorageException; +import com.microsoft.azure.storage.StorageException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.AzureStorageService; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -29,7 +29,10 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.io.*; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.OutputStream; import java.net.URISyntaxException; import java.util.Locale; import java.util.Map; From 3ef4cc5a78308d0a0e1253d594227ec3b4b1ddf4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 13 Feb 2015 15:15:17 +0100 Subject: [PATCH 085/125] Open Lucene MMap/SimpleFSDirectory with Read option This commit adds 2 IndexStoreModule / IndexStore / DirectoryService that open Lucene MMapFSDirectory / SimpleFSDirectory with a "read" option. It can be activated in configuration with: index.store.type: org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule or index.store.type: org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule (cherry picked from commit 2388ace) (cherry picked from commit 16014a0) --- README.md | 30 +++++++ .../lucene/store/SmbDirectoryWrapper.java | 79 +++++++++++++++++++ .../store/fs/SmbMmapFsDirectoryService.java | 47 +++++++++++ .../index/store/fs/SmbMmapFsIndexStore.java | 43 ++++++++++ .../store/fs/SmbMmapFsIndexStoreModule.java | 31 ++++++++ .../store/fs/SmbSimpleFsDirectoryService.java | 47 +++++++++++ .../index/store/fs/SmbSimpleFsIndexStore.java | 44 +++++++++++ .../store/fs/SmbSimpleFsIndexStoreModule.java | 31 ++++++++ .../lucene/store/SmbMMapDirectoryTest.java | 31 ++++++++ .../store/SmbSimpleFSDirectoryTest.java | 31 ++++++++ 10 files changed, 414 insertions(+) create mode 100644 src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java create mode 100644 src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java create mode 100644 src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java create mode 100644 src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java create mode 100644 src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java create mode 100644 src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java create mode 100644 src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java create mode 100644 src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java create mode 100644 src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java diff --git a/README.md b/README.md index 9a6adf84a96..42e165bec44 100644 --- a/README.md +++ b/README.md @@ -447,6 +447,36 @@ To run test: mvn -Dtests.azure=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test ``` +Optimizing index storage on Azure File Service +============================================== +When using a shared file system based on the SMB protocol (like Azure File Service) to store indices, the way Lucene open index segment files +can slow down some functionalities like indexing documents and recovering index shards. If you'd like to know more about this behavior and how Lucene and SMB interact, +have a look at [LUCENE-6176](https://issues.apache.org/jira/browse/LUCENE-6176). + +In order to get better performance, the Azure Cloud plugin provides two storage types optimized for SMB: + +- `org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule`: a SMB specific implementation of the default [mmap fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#mmapfs) +- `org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule`: a SMB specific implementation of the default [simple fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#simplefs) + +To use one of these specific storage types, you need to install the Azure Cloud plugin and restart the node. Then configure Elasticsearch to set +the storage type you want. + +This can be configured for all indices by adding this to the config/elasticsearch.yml file: +``` +index.store.type: org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule +``` + +Note that setting will be applied for newly created indices. + +It can also be set on a per-index basis at index creation time: +``` +curl -XPUT localhost:9200/my_index -d '{ + "settings": { + "index.store.type": "org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule" + } +}' +``` + License ------- diff --git a/src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java b/src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java new file mode 100644 index 00000000000..1e783fdd6c5 --- /dev/null +++ b/src/main/java/org/apache/lucene/store/SmbDirectoryWrapper.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.store; + +import java.io.FilterOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; + +/** + * This class is used to wrap an existing {@link org.apache.lucene.store.FSDirectory} so that + * the new shard segment files will be opened for Read and Write access. + *

    + * When storing index files on an SMB share like Azure File Service, opening the file for Read + * access can save a lot of roundtrips to the storage server and thus offering better performance. + */ +public final class SmbDirectoryWrapper extends FilterDirectory { + + private final FSDirectory fsDirectory; + + public SmbDirectoryWrapper(FSDirectory in) { + super(in); + fsDirectory = in; + } + + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + fsDirectory.ensureOpen(); + fsDirectory.ensureCanWrite(name); + return new SmbFSIndexOutput(name); + } + + /** + * Copied from final inner class {@link org.apache.lucene.store.FSDirectory.FSIndexOutput} + */ + final class SmbFSIndexOutput extends OutputStreamIndexOutput { + /** + * The maximum chunk size is 8192 bytes, because {@link java.io.FileOutputStream} mallocs + * a native buffer outside of stack if the write buffer size is larger. + */ + static final int CHUNK_SIZE = 8192; + + private final String name; + + public SmbFSIndexOutput(String name) throws IOException { + super("SmbFSIndexOutput(path=\"" + fsDirectory.getDirectory().resolve(name) + "\")", new FilterOutputStream(Channels.newOutputStream(Files.newByteChannel(fsDirectory.getDirectory().resolve(name), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE))) { + // This implementation ensures, that we never write more than CHUNK_SIZE bytes: + @Override + public void write(byte[] b, int offset, int length) throws IOException { + while (length > 0) { + final int chunk = Math.min(length, CHUNK_SIZE); + out.write(b, offset, chunk); + length -= chunk; + offset += chunk; + } + } + }, CHUNK_SIZE); + this.name = name; + } + } +} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java new file mode 100644 index 00000000000..cbdfb3f527d --- /dev/null +++ b/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.LockFactory; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.store.SmbDirectoryWrapper; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.IndexStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbMmapFsDirectoryService extends FsDirectoryService { + + @Inject + public SmbMmapFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) { + super(shardId, indexSettings, indexStore); + } + + @Override + protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + logger.debug("wrapping MMapDirectory for SMB"); + return new SmbDirectoryWrapper(new MMapDirectory(location, buildLockFactory())); + } +} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java new file mode 100644 index 00000000000..76a7a04310c --- /dev/null +++ b/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.indices.store.IndicesStore; + +public class SmbMmapFsIndexStore extends AbstractIndexStore { + + @Inject + public SmbMmapFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { + super(index, indexSettings, indexService, indicesStore, nodeEnv); + } + + @Override + public Class shardDirectory() { + return SmbMmapFsDirectoryService.class; + } +} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java new file mode 100644 index 00000000000..768fab5c0db --- /dev/null +++ b/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.index.store.IndexStore; + +public class SmbMmapFsIndexStoreModule extends AbstractModule { + + @Override + protected void configure() { + bind(IndexStore.class).to(SmbMmapFsIndexStore.class).asEagerSingleton(); + } +} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java new file mode 100644 index 00000000000..61020bc1ef3 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.LockFactory; +import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.SmbDirectoryWrapper; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.IndexStore; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbSimpleFsDirectoryService extends FsDirectoryService { + + @Inject + public SmbSimpleFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore) { + super(shardId, indexSettings, indexStore); + } + + @Override + protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + logger.debug("wrapping SimpleFSDirectory for SMB"); + return new SmbDirectoryWrapper(new SimpleFSDirectory(location, lockFactory)); + } +} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java new file mode 100644 index 00000000000..bd3a5ad539b --- /dev/null +++ b/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.indices.store.IndicesStore; + +public class SmbSimpleFsIndexStore extends AbstractIndexStore { + + @Inject + public SmbSimpleFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { + super(index, indexSettings, indexService, indicesStore, nodeEnv); + } + + @Override + public Class shardDirectory() { + return SmbSimpleFsDirectoryService.class; + } +} + diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java new file mode 100644 index 00000000000..88d075ab77b --- /dev/null +++ b/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store.fs; + +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.index.store.IndexStore; + +public class SmbSimpleFsIndexStoreModule extends AbstractModule { + + @Override + protected void configure() { + bind(IndexStore.class).to(SmbSimpleFsIndexStore.class).asEagerSingleton(); + } +} diff --git a/src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java b/src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java new file mode 100644 index 00000000000..65d837f7760 --- /dev/null +++ b/src/test/java/org/apache/lucene/store/SmbMMapDirectoryTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.store; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbMMapDirectoryTest extends BaseDirectoryTestCase { + + @Override + protected Directory getDirectory(Path file) throws IOException { + return new SmbDirectoryWrapper(new MMapDirectory(file)); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java b/src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java new file mode 100644 index 00000000000..5031411155c --- /dev/null +++ b/src/test/java/org/apache/lucene/store/SmbSimpleFSDirectoryTest.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.store; + +import java.io.IOException; +import java.nio.file.Path; + +public class SmbSimpleFSDirectoryTest extends BaseDirectoryTestCase { + + @Override + protected Directory getDirectory(Path file) throws IOException { + return new SmbDirectoryWrapper(new SimpleFSDirectory(file)); + } +} From de24b379ee5d4682feda68b63419868a752dbc9c Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 17:11:56 +0100 Subject: [PATCH 086/125] Script examples should escape special chars --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 42e165bec44..c25f56e8ef3 100644 --- a/README.md +++ b/README.md @@ -177,7 +177,7 @@ azure vm create azure-elasticsearch-cluster \ --vm-size extrasmall \ --ssh 22 \ --ssh-cert /tmp/azure-certificate.pem \ - elasticsearch password1234!! + elasticsearch password1234\!\! ``` You should see something like: @@ -327,7 +327,7 @@ azure vm create azure-elasticsearch-cluster \ --vm-size extrasmall \ --ssh 22 \ --ssh-cert /tmp/azure-certificate.pem \ - elasticsearch password1234!! + elasticsearch password1234\!\! ``` > **Note:** It could happen that azure changes the endpoint public IP address. @@ -352,7 +352,7 @@ for x in $(seq 2 10) --ssh $((21 + $x)) \ --ssh-cert /tmp/azure-certificate.pem \ --connect \ - elasticsearch password1234!! + elasticsearch password1234\!\! done ``` From 24cf553c9a3befc8b0eb98f9d3a4b2882a546df0 Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Sat, 14 Feb 2015 17:53:55 +0100 Subject: [PATCH 087/125] Update README.md clarify why we wrote the smb dir --- README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c25f56e8ef3..6719e6e23bb 100644 --- a/README.md +++ b/README.md @@ -447,13 +447,11 @@ To run test: mvn -Dtests.azure=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test ``` -Optimizing index storage on Azure File Service -============================================== -When using a shared file system based on the SMB protocol (like Azure File Service) to store indices, the way Lucene open index segment files -can slow down some functionalities like indexing documents and recovering index shards. If you'd like to know more about this behavior and how Lucene and SMB interact, -have a look at [LUCENE-6176](https://issues.apache.org/jira/browse/LUCENE-6176). +Working around a bug in Windows SMB and Java on windows +======================================================= +When using a shared file system based on the SMB protocol (like Azure File Service) to store indices, the way Lucene open index segment files is with a write only flag. This is the *correct* way to open the files, as they will only be used for writes and allows different FS implementations to optimize for it. Sadly, in windows with SMB, this disables the cache manager, causing writes to be slow. This has been described in [LUCENE-6176](https://issues.apache.org/jira/browse/LUCENE-6176), but it affects each and every Java program out there!. This need and must be fixed outside of ES and/or Lucene, either in windows or OpenJDK. For now, we are providing an experimental support to open the files with read flag, but this should be considered experimental and the correct way to fix it is in OpenJDK or Windows. -In order to get better performance, the Azure Cloud plugin provides two storage types optimized for SMB: +The Azure Cloud plugin provides two storage types optimized for SMB: - `org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule`: a SMB specific implementation of the default [mmap fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#mmapfs) - `org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule`: a SMB specific implementation of the default [simple fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#simplefs) From 396cf117c9b77b0b88750c474893b2d89e663d0e Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 17 Feb 2015 10:33:39 +0100 Subject: [PATCH 088/125] Simplify setting index.store.type with smb_simple_fs and smb_mmap_fs This commit changes work done in #60 by using automatic store type discovery which exists in elasticsearch core. Instead of writing configuration like: ```yaml index.store.type: org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule index.store.type: org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule ``` We can now use: ```yaml index.store.type: smb_mmap_fs index.store.type: smb_simple_fs ``` Note that we move `org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule` to `org.elasticsearch.index.store.smbmmapfs.SmbMmapFsIndexStoreModule` and `org.elasticsearch.index.store.fs.SmbSimpleFsDirectoryService` to `org.elasticsearch.index.store.smbsimplefs.SmbSimpleFsDirectoryService` instead of deprecating the old classes and add new ones. It means that if users were using a 2.5.2-SNAPSHOT version, they will need to update their settings. (cherry picked from commit 613ce5a) (cherry picked from commit 7831521) --- README.md | 20 +++++---- .../SmbMmapFsDirectoryService.java | 3 +- .../SmbMmapFsIndexStore.java | 2 +- .../SmbMmapFsIndexStoreModule.java | 2 +- .../SmbSimpleFsDirectoryService.java | 3 +- .../SmbSimpleFsIndexStore.java | 2 +- .../SmbSimpleFsIndexStoreModule.java | 2 +- .../index/store/AbstractAzureFsTest.java | 42 +++++++++++++++++++ .../index/store/SmbMMapFsTest.java | 36 ++++++++++++++++ .../index/store/SmbSimpleFsTest.java | 34 +++++++++++++++ 10 files changed, 131 insertions(+), 15 deletions(-) rename src/main/java/org/elasticsearch/index/store/{fs => smbmmapfs}/SmbMmapFsDirectoryService.java (94%) rename src/main/java/org/elasticsearch/index/store/{fs => smbmmapfs}/SmbMmapFsIndexStore.java (97%) rename src/main/java/org/elasticsearch/index/store/{fs => smbmmapfs}/SmbMmapFsIndexStoreModule.java (95%) rename src/main/java/org/elasticsearch/index/store/{fs => smbsimplefs}/SmbSimpleFsDirectoryService.java (94%) rename src/main/java/org/elasticsearch/index/store/{fs => smbsimplefs}/SmbSimpleFsIndexStore.java (97%) rename src/main/java/org/elasticsearch/index/store/{fs => smbsimplefs}/SmbSimpleFsIndexStoreModule.java (95%) create mode 100644 src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java create mode 100644 src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java create mode 100644 src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java diff --git a/README.md b/README.md index 6719e6e23bb..edb37903be6 100644 --- a/README.md +++ b/README.md @@ -453,24 +453,26 @@ When using a shared file system based on the SMB protocol (like Azure File Servi The Azure Cloud plugin provides two storage types optimized for SMB: -- `org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule`: a SMB specific implementation of the default [mmap fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#mmapfs) -- `org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule`: a SMB specific implementation of the default [simple fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#simplefs) +- `smb_mmap_fs`: a SMB specific implementation of the default [mmap fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#mmapfs) +- `smb_simple_fs`: a SMB specific implementation of the default [simple fs](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-store.html#simplefs) -To use one of these specific storage types, you need to install the Azure Cloud plugin and restart the node. Then configure Elasticsearch to set -the storage type you want. +To use one of these specific storage types, you need to install the Azure Cloud plugin and restart the node. +Then configure Elasticsearch to set the storage type you want. -This can be configured for all indices by adding this to the config/elasticsearch.yml file: -``` -index.store.type: org.elasticsearch.index.store.fs.SmbSimpleFsIndexStoreModule +This can be configured for all indices by adding this to the `elasticsearch.yml` file: + +```yaml +index.store.type: smb_simple_fs ``` Note that setting will be applied for newly created indices. It can also be set on a per-index basis at index creation time: -``` + +```sh curl -XPUT localhost:9200/my_index -d '{ "settings": { - "index.store.type": "org.elasticsearch.index.store.fs.SmbMmapFsIndexStoreModule" + "index.store.type": "smb_mmap_fs" } }' ``` diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java similarity index 94% rename from src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java rename to src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java index cbdfb3f527d..9db1800658d 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsDirectoryService.java +++ b/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsDirectoryService.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store.smbmmapfs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockFactory; @@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.fs.FsDirectoryService; import java.io.IOException; import java.nio.file.Path; diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java similarity index 97% rename from src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java rename to src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java index 76a7a04310c..e6df91c7738 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStore.java +++ b/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStore.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store.smbmmapfs; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStoreModule.java similarity index 95% rename from src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java rename to src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStoreModule.java index 768fab5c0db..335a58b32c5 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/SmbMmapFsIndexStoreModule.java +++ b/src/main/java/org/elasticsearch/index/store/smbmmapfs/SmbMmapFsIndexStoreModule.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store.smbmmapfs; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.index.store.IndexStore; diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java similarity index 94% rename from src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java rename to src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java index 61020bc1ef3..af4c4702b5c 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsDirectoryService.java +++ b/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsDirectoryService.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store.smbsimplefs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockFactory; @@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.fs.FsDirectoryService; import java.io.IOException; import java.nio.file.Path; diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java similarity index 97% rename from src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java rename to src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java index bd3a5ad539b..54f25b89199 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStore.java +++ b/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStore.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store.smbsimplefs; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStoreModule.java similarity index 95% rename from src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java rename to src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStoreModule.java index 88d075ab77b..9604dc11c77 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/SmbSimpleFsIndexStoreModule.java +++ b/src/main/java/org/elasticsearch/index/store/smbsimplefs/SmbSimpleFsIndexStoreModule.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store.smbsimplefs; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.index.store.IndexStore; diff --git a/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java b/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java new file mode 100644 index 00000000000..497e807d60e --- /dev/null +++ b/src/test/java/org/elasticsearch/index/store/AbstractAzureFsTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import static org.hamcrest.Matchers.is; + +abstract public class AbstractAzureFsTest extends ElasticsearchIntegrationTest { + + @Test + public void testAzureFs() { + // Create an index and index some documents + createIndex("test"); + long nbDocs = randomIntBetween(10, 1000); + for (long i = 0; i < nbDocs; i++) { + index("test", "doc", "" + i, "foo", "bar"); + } + refresh(); + SearchResponse response = client().prepareSearch("test").get(); + assertThat(response.getHits().totalHits(), is(nbDocs)); + } +} diff --git a/src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java b/src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java new file mode 100644 index 00000000000..ec4941cb24d --- /dev/null +++ b/src/test/java/org/elasticsearch/index/store/SmbMMapFsTest.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; + + +public class SmbMMapFsTest extends AbstractAzureFsTest { + + @Override + public Settings indexSettings() { + return ImmutableSettings.builder() + .put(super.indexSettings()) + .put("index.store.type", "smb_mmap_fs") + .build(); + } + +} diff --git a/src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java b/src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java new file mode 100644 index 00000000000..96c55a42562 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/store/SmbSimpleFsTest.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.store; + +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; + + +public class SmbSimpleFsTest extends AbstractAzureFsTest { + @Override + public Settings indexSettings() { + return ImmutableSettings.builder() + .put(super.indexSettings()) + .put("index.store.type", "smb_simple_fs") + .build(); + } +} From a69ef2b5e23db1f5831761ada5c7a98c0dac1c65 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 17 Feb 2015 12:30:52 +0100 Subject: [PATCH 089/125] [Test] use seed in container name and clean all after tests Until version 2.5.2, our integration tests were using the same `container_name` for all tests but different `base_path` for repositories. One test uses the default `base_path` so we ended up with conflicts while running at the same time a test from different locations (could be by developers and/or Jenkins itself). This commit changes this: * it creates a container name which uses the test seed, eg. `snapshot-itest-b2dbda478ab9da2f` * for `testMultipleRepositories`, we append `-1` and `-2` to the previous container name * for `testRemoveAndCreateContainer`, we append `-testremove` * for `testForbiddenContainerName`, we now remove containers which are supposed to be created: `123456789-123456789-123456789-123456789-123456789-123456789-123`, `elasticsearch-integration` and `elasticsearch-integration-007`. It also means that we wait up to 5 minutes if we launch just after the same test because Azure removes containers in an asynchronous way so it's not possible to create the container right after we send a remove container request. * After and before the test suite, we make sure that we clean the containers that have been created during the test suite execution. It means that after a test, no file should remain within the storage account used for those integration tests. Closes #50. --- .../azure/itest/AzureSimpleITest.java | 8 ++ .../cloud/azure/AbstractAzureTest.java | 10 +- .../azure/AzureSnapshotRestoreITest.java | 96 ++++++++++++------- 3 files changed, 75 insertions(+), 39 deletions(-) diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java index 62be0c2c0dc..a4bb854c986 100644 --- a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -40,6 +40,14 @@ import org.junit.Test; transportClientRatio = 0.0) public class AzureSimpleITest extends AbstractAzureTest { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return ImmutableSettings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("discovery.type", "azure") + .build(); + } + @Test public void one_node_should_run() { // Do nothing... Just start :-) diff --git a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java index 4503c962364..9d3cca01694 100644 --- a/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java +++ b/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTest.java @@ -55,10 +55,15 @@ public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { - ImmutableSettings.Builder settings = ImmutableSettings.builder() + return ImmutableSettings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true); + .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true) + .put(readSettingsFromFile()) + .build(); + } + protected static Settings readSettingsFromFile() { + ImmutableSettings.Builder settings = ImmutableSettings.builder(); Environment environment = new Environment(); // if explicit, just load it and don't load from env @@ -73,5 +78,4 @@ public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest { } return settings.build(); } - } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index 490bb5f35bc..db10bff2c02 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -27,27 +27,30 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotR import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.cloud.azure.AbstractAzureTest; +import org.elasticsearch.cloud.azure.AzureSettingsFilter; import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.cloud.azure.AzureStorageServiceImpl; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.base.Predicate; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.store.MockDirectoryHelper; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.*; import java.net.URISyntaxException; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; /** * This test needs Azure to run and -Dtests.azure=true to be set @@ -61,16 +64,26 @@ import static org.hamcrest.Matchers.*; transportClientRatio = 0.0) public class AzureSnapshotRestoreITest extends AbstractAzureTest { + private static ESLogger logger = ESLoggerFactory.getLogger(AzureSnapshotRestoreITest.class.getName()); + private String getRepositoryPath() { - String testName = "/snapshot-itest/repo-".concat("" + randomIntBetween(1, 1000)); + String testName = "it-".concat(Strings.toUnderscoreCase(getTestName()).replaceAll("_", "-")); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } - private String getContainerName() { - String testName = "it-".concat(Strings.toUnderscoreCase(getTestName()).replaceAll("_", "-")); + private static String getContainerName() { + String testName = "snapshot-itest-".concat(getContext().getRunnerSeedAsString().toLowerCase()); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return ImmutableSettings.builder().put(super.nodeSettings(nodeOrdinal)) + // In snapshot tests, we explicitly disable cloud discovery + .put("discovery.type", "local") + .build(); + } + @Override public Settings indexSettings() { // During restore we frequently restore index to exactly the same state it was before, that might cause the same @@ -81,18 +94,13 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { .build(); } - @Before - public final void wipeBefore() throws StorageException, URISyntaxException { + @Before @After + public final void wipeAzureRepositories() throws StorageException, URISyntaxException { wipeRepositories(); - cleanRepositoryFiles( - getContainerName(), - getContainerName().concat("-1"), - getContainerName().concat("-2")); } - @After - public final void wipeAfter() throws StorageException, URISyntaxException { - wipeRepositories(); + @BeforeClass @AfterClass + public static final void wipeAzureContainers() throws StorageException, URISyntaxException { cleanRepositoryFiles( getContainerName(), getContainerName().concat("-1"), @@ -329,7 +337,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { * For issue #21: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/21 */ @Test - public void testForbiddenContainerName() { + public void testForbiddenContainerName() throws URISyntaxException, StorageException, InterruptedException { checkContainerName("", false); checkContainerName("es", false); checkContainerName("-elasticsearch", false); @@ -347,23 +355,35 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { * @param container Container name we want to create * @param correct Is this container name correct */ - private void checkContainerName(String container, boolean correct) { + private void checkContainerName(final String container, final boolean correct) throws URISyntaxException, StorageException, InterruptedException { logger.info("--> creating azure repository with container name [{}]", container); - try { - PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") - .setType("azure").setSettings(ImmutableSettings.settingsBuilder() - .put(AzureStorageService.Fields.CONTAINER, container) - .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) - .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), is(correct)); - client().admin().cluster().prepareDeleteRepository("test-repo").get(); - } catch (RepositoryException e) { - if (correct) { - // We did not expect any exception here :( - throw e; + // It could happen that we just removed from a previous test the same container so + // we can not create it yet. + assertThat(awaitBusy(new Predicate() { + public boolean apply(Object obj) { + try { + PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(AzureStorageService.Fields.CONTAINER, container) + .put(AzureStorageService.Fields.BASE_PATH, getRepositoryPath()) + .put(AzureStorageService.Fields.CHUNK_SIZE, randomIntBetween(1000, 10000)) + ).get(); + client().admin().cluster().prepareDeleteRepository("test-repo").get(); + try { + cleanRepositoryFiles(container); + } catch (StorageException | URISyntaxException e) { + // We can ignore that as we just try to clean after the test + } + return (putRepositoryResponse.isAcknowledged() == correct); + } catch (RepositoryVerificationException e) { + if (!correct) { + return true; + } + logger.debug(" -> container is being removed. Let's wait a bit..."); + return false; + } } - } + }, 5, TimeUnit.MINUTES), equalTo(true)); } /** @@ -395,7 +415,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { */ @Test public void testRemoveAndCreateContainer() throws URISyntaxException, StorageException, InterruptedException { - final String container = getContainerName(); + final String container = getContainerName().concat("-testremove"); final AzureStorageService storageService = internalCluster().getInstance(AzureStorageService.class); // It could happen that we run this test really close to a previous one @@ -451,8 +471,12 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { /** * Purge the test containers */ - public void cleanRepositoryFiles(String... containers) throws StorageException, URISyntaxException { - AzureStorageService client = internalCluster().getInstance(AzureStorageService.class); + public static void cleanRepositoryFiles(String... containers) throws StorageException, URISyntaxException { + Settings settings = readSettingsFromFile(); + SettingsFilter settingsFilter = new SettingsFilter(settings); + settingsFilter.addFilter(new AzureSettingsFilter()); + + AzureStorageService client = new AzureStorageServiceImpl(settings, settingsFilter); for (String container : containers) { logger.info("--> remove container [{}]", container); client.removeContainer(container); From 27546ecb1613b5bbcb28e3331cda3c7a9b739194 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 17 Feb 2015 14:48:11 +0100 Subject: [PATCH 090/125] Tests don't pass if elasticsearch.yml does not contain azure discovery settings For now, we remove forcing discovery settings with `discovery.type: azure`. So a developer who wants to run this test needs to add it in his `elasticsearch.yml` file. Due to #50 (cherry picked from commit 6ac4483) (cherry picked from commit 1a80c4b) --- .../java/org/elasticsearch/azure/itest/AzureSimpleITest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java index a4bb854c986..4d3d79daf8f 100644 --- a/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java +++ b/src/test/java/org/elasticsearch/azure/itest/AzureSimpleITest.java @@ -44,7 +44,9 @@ public class AzureSimpleITest extends AbstractAzureTest { protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("discovery.type", "azure") + // For now we let the user who runs tests to define if he wants or not to run discovery tests + // by setting in elasticsearch.yml: discovery.type: azure + // .put("discovery.type", "azure") .build(); } From a8961d28c988e9d0b7846ed526b9f1041cb19932 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 17 Feb 2015 16:39:09 +0100 Subject: [PATCH 091/125] Fix failing tests with elasticsearch 1.4.3 (cherry picked from commit 040e96f) (cherry picked from commit 13c83ce) --- .../discovery/azure/AbstractAzureComputeServiceTest.java | 7 +------ .../discovery/azure/AzureMinimumMasterNodesTest.java | 6 +++++- .../repositories/azure/AzureSnapshotRestoreITest.java | 6 +----- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java index 96dd47314f6..2eac5216b45 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -60,13 +60,8 @@ public abstract class AbstractAzureComputeServiceTest extends ElasticsearchInteg .put("cloud.azure.keystore", "dummy") .put("cloud.azure.password", "dummy") .put("cloud.azure.service_name", "dummy") - .put("cloud.azure.refresh_interval", "5s") // We need the network to make the mock working - .put("node.mode", "network") - // Make the tests run faster - .put("discovery.zen.join.timeout", "100ms") - .put("discovery.zen.ping.timeout", "10ms") - .put("discovery.initial_state_timeout", "300ms"); + .put("node.mode", "network"); return builder.build(); } diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java index b6614500c10..f3264380b7e 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java @@ -47,8 +47,12 @@ public class AzureMinimumMasterNodesTest extends AbstractAzureComputeServiceTest @Override protected final Settings settingsBuilder() { ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder() + .put(super.settingsBuilder()) .put("discovery.zen.minimum_master_nodes", 2) - .put(super.settingsBuilder()); + // Make the test run faster + .put("discovery.zen.join.timeout", "50ms") + .put("discovery.zen.ping.timeout", "10ms") + .put("discovery.initial_state_timeout", "100ms"); return builder.build(); } diff --git a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java index db10bff2c02..0625cabb187 100644 --- a/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java +++ b/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreITest.java @@ -33,8 +33,6 @@ import org.elasticsearch.cloud.azure.AzureStorageServiceImpl; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Strings; import org.elasticsearch.common.base.Predicate; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; @@ -64,8 +62,6 @@ import static org.hamcrest.Matchers.greaterThan; transportClientRatio = 0.0) public class AzureSnapshotRestoreITest extends AbstractAzureTest { - private static ESLogger logger = ESLoggerFactory.getLogger(AzureSnapshotRestoreITest.class.getName()); - private String getRepositoryPath() { String testName = "it-".concat(Strings.toUnderscoreCase(getTestName()).replaceAll("_", "-")); return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; @@ -370,6 +366,7 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { ).get(); client().admin().cluster().prepareDeleteRepository("test-repo").get(); try { + logger.info("--> remove container [{}]", container); cleanRepositoryFiles(container); } catch (StorageException | URISyntaxException e) { // We can ignore that as we just try to clean after the test @@ -478,7 +475,6 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest { AzureStorageService client = new AzureStorageServiceImpl(settings, settingsFilter); for (String container : containers) { - logger.info("--> remove container [{}]", container); client.removeContainer(container); } } From 696804ebdbb5c03f6591d30a83490187e70b4578 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 18 Feb 2015 16:14:21 +0100 Subject: [PATCH 092/125] Fix log parameters --- .../org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java index 894ad5d413c..c7387ee7b82 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java @@ -150,7 +150,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent Date: Wed, 4 Feb 2015 17:11:56 +0100 Subject: [PATCH 093/125] Rename settings for repositories Storage and Management settings are not related in Azure API. We were using the following convention for storage specific settings: ```yml cloud: azure: storage_account: your_azure_storage_account storage_key: your_azure_storage_key ``` A better naming (and Java packaging) would be by moving `storage` settings in their own package `cloud.azure.storage`: ```yml cloud: azure: storage: account: your_azure_storage_account key: your_azure_storage_key ``` Note that we can still read deprecated settings but it will print a `WARN` with an advice to make sure users don't forget to move. (cherry picked from commit 6b30c62) (cherry picked from commit 1237be9) --- README.md | 15 ++++++++++-- .../cloud/azure/AzureModule.java | 23 +++++++++++++++---- .../cloud/azure/AzureSettingsFilter.java | 16 ++++++++----- .../cloud/azure/blobstore/AzureBlobStore.java | 2 +- .../{ => storage}/AzureStorageService.java | 12 ++++++---- .../AzureStorageServiceImpl.java | 9 ++++---- .../plugin/cloud/azure/CloudAzurePlugin.java | 15 +++++++++++- .../repositories/azure/AzureRepository.java | 2 +- .../storage}/AzureStorageServiceMock.java | 3 +-- .../AbstractAzureRepositoryServiceTest.java | 13 ++++++++--- .../azure/AzureSnapshotRestoreITest.java | 4 ++-- .../azure/AzureSnapshotRestoreTest.java | 1 + 12 files changed, 85 insertions(+), 30 deletions(-) rename src/main/java/org/elasticsearch/cloud/azure/{ => storage}/AzureStorageService.java (85%) rename src/main/java/org/elasticsearch/cloud/azure/{ => storage}/AzureStorageServiceImpl.java (95%) rename src/test/java/org/elasticsearch/{repositories/azure => cloud/azure/storage}/AzureStorageServiceMock.java (98%) diff --git a/README.md b/README.md index edb37903be6..76eb42d6195 100644 --- a/README.md +++ b/README.md @@ -365,7 +365,17 @@ azure vm delete myesnode1 Azure Repository ================ -To enable Azure repositories, you have first to set your azure storage settings: +To enable Azure repositories, you have first to set your azure storage settings in `elasticsearch.yml` file: + +``` +cloud: + azure: + storage: + account: your_azure_storage_account + key: your_azure_storage_key +``` + +For information, in previous version of the azure plugin, settings were: ``` cloud: @@ -430,11 +440,12 @@ Testing ======= Integrations tests in this plugin require working Azure configuration and therefore disabled by default. -To enable tests prepare a config file elasticsearch.yml with the following content: +To enable tests prepare a config file `elasticsearch.yml` with the following content: ``` cloud: azure: + storage: account: "YOUR-AZURE-STORAGE-NAME" key: "YOUR-AZURE-STORAGE-KEY" ``` diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java index 90ed8daa1f1..accba7ba1c1 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -20,6 +20,8 @@ package org.elasticsearch.cloud.azure; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; @@ -39,7 +41,7 @@ import org.elasticsearch.discovery.azure.AzureDiscovery; * * * @see org.elasticsearch.cloud.azure.AzureComputeServiceImpl - * @see org.elasticsearch.cloud.azure.AzureStorageServiceImpl + * @see org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl */ public class AzureModule extends AbstractModule { protected final ESLogger logger; @@ -121,9 +123,11 @@ public class AzureModule extends AbstractModule { return false; } - if (isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT, null) || - isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.KEY, null)) { - logger.trace("azure repository is not set using {} and {} properties", + if ((isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT, null) || + isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.KEY, null)) && + (isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED, null) || + isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED, null))) { + logger.trace("azure repository is not set [using cloud.azure.storage.{}] and [cloud.azure.storage.{}] properties", AzureStorageService.Fields.ACCOUNT, AzureStorageService.Fields.KEY); return false; @@ -134,6 +138,17 @@ public class AzureModule extends AbstractModule { return true; } + /** + * Check if we are using any deprecated settings + */ + public static void checkDeprecatedSettings(Settings settings, String oldParameter, String newParameter, ESLogger logger) { + if (!isPropertyMissing(settings, oldParameter, null)) { + logger.warn("using deprecated [{}]. Please change it to [{}] property.", + oldParameter, + newParameter); + } + } + public static boolean isPropertyMissing(Settings settings, String name, ESLogger logger) throws ElasticsearchException { if (!Strings.hasText(settings.get(name))) { if (logger != null) { diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java index f8911ae8830..d00006127e5 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -19,11 +19,12 @@ package org.elasticsearch.cloud.azure; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.SettingsFilter; /** - * Filtering cloud.azure.* and repositories.azure.* settings + * Filtering cloud.azure.* settings */ public class AzureSettingsFilter implements SettingsFilter.Filter { @@ -35,10 +36,13 @@ public class AzureSettingsFilter implements SettingsFilter.Filter { settings.remove("cloud.azure.subscription_id"); settings.remove("cloud.azure.service_name"); - // Repositories settings - settings.remove("repositories.azure.account"); - settings.remove("repositories.azure.key"); - settings.remove("repositories.azure.container"); - settings.remove("repositories.azure.base_path"); + // Cloud storage API settings + settings.remove("cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT); + settings.remove("cloud.azure.storage." + AzureStorageService.Fields.KEY); + + // Deprecated Cloud storage API settings + // TODO Remove in 3.0.0 + settings.remove("cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED); + settings.remove("cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED); } } diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 12581d3b63e..75f6dea9adf 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -20,7 +20,7 @@ package org.elasticsearch.cloud.azure.blobstore; import com.microsoft.azure.storage.StorageException; -import org.elasticsearch.cloud.azure.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java b/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java similarity index 85% rename from src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java rename to src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 97b5c26a69e..c2bc54f62dd 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -29,12 +29,16 @@ import java.net.URISyntaxException; /** * Azure Storage Service interface - * @see org.elasticsearch.cloud.azure.AzureStorageServiceImpl for Azure REST API implementation + * @see AzureStorageServiceImpl for Azure REST API implementation */ public interface AzureStorageService { static public final class Fields { - public static final String ACCOUNT = "storage_account"; - public static final String KEY = "storage_key"; + @Deprecated + public static final String ACCOUNT_DEPRECATED = "storage_account"; + @Deprecated + public static final String KEY_DEPRECATED = "storage_key"; + public static final String ACCOUNT = "account"; + public static final String KEY = "key"; public static final String CONTAINER = "container"; public static final String BASE_PATH = "base_path"; public static final String CHUNK_SIZE = "chunk_size"; diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java similarity index 95% rename from src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java rename to src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index c7387ee7b82..d7383b61717 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureStorageServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -17,12 +17,13 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.*; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.AzureSettingsFilter; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.ImmutableMap; @@ -54,9 +55,9 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent Date: Wed, 4 Feb 2015 17:11:56 +0100 Subject: [PATCH 094/125] Rename settings for discovery Azure Java SDK follows some conventions about property names and we can use the same convention. The benefit is that it makes the code cleaner and easier to understand as components settings relies on package name without the `org.elasticsearch` prefix. In previous versions, we defined: ``` cloud: azure: keystore: /path/to/keystore password: your_password_for_keystore subscription_id: your_azure_subscription_id service_name: your_azure_cloud_service_name port_name: elasticsearch host_type: private_ip ``` We now define: ``` cloud: azure: management: keystore: path: /path/to/keystore password: your_password_for_keystore subscription: id: your_azure_subscription_id cloud: service: name: your_azure_cloud_service_name discovery: azure: host: type: private_ip endpoint: name: elasticsearch ``` The following are a list of settings (prefixed with `cloud.azure.management`) that can further control the discovery: * `keystore.path`: /path/to/keystore * `keystore.password`: your_password for the keystore * `subscription.id`: your_azure_subscription_id * `cloud.service.name`: your_azure_cloud_service_name So basically we deprecate the following properties: * `cloud.azure.keystore` to `cloud.azure.management.keystore.path` * `cloud.azure.password` to `cloud.azure.management.keystore.password` * `cloud.azure.subscription_id` to `cloud.azure.management.subscription.id` * `cloud.azure.service_name` to `cloud.azure.management.cloud.service.name` * `cloud.azure.port_name` to `discovery.azure.port.name` * `cloud.azure.host_type` to `discovery.azure.host.type` Note that we can still read deprecated settings but it will print a `WARN` with an advice to make sure users don't forget to move. (cherry picked from commit 04920a8) (cherry picked from commit 9745826) --- README.md | 49 +++++++++++-- .../cloud/azure/AzureComputeService.java | 40 ----------- .../cloud/azure/AzureModule.java | 70 +++++++++++++++---- .../cloud/azure/AzureSettingsFilter.java | 18 +++-- .../azure/management/AzureComputeService.java | 60 ++++++++++++++++ .../AzureComputeServiceImpl.java | 50 ++++++++----- .../azure/AzureUnicastHostsProvider.java | 12 ++-- .../plugin/cloud/azure/CloudAzurePlugin.java | 14 +--- .../AzureComputeServiceAbstractMock.java | 3 +- .../AzureComputeServiceSimpleMock.java | 2 +- .../AzureComputeServiceTwoNodesMock.java | 2 +- .../AbstractAzureComputeServiceTest.java | 2 +- .../azure/AzureInstanceXmlParserTest.java | 2 +- .../azure/AzureMinimumMasterNodesTest.java | 1 + .../discovery/azure/AzureSimpleTest.java | 1 + .../azure/AzureTwoStartedNodesTest.java | 1 + 16 files changed, 221 insertions(+), 106 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java rename src/main/java/org/elasticsearch/cloud/azure/{ => management}/AzureComputeServiceImpl.java (80%) rename src/test/java/org/elasticsearch/{discovery/azure => cloud/azure/management}/AzureComputeServiceAbstractMock.java (94%) rename src/test/java/org/elasticsearch/{discovery/azure => cloud/azure/management}/AzureComputeServiceSimpleMock.java (96%) rename src/test/java/org/elasticsearch/{discovery/azure => cloud/azure/management}/AzureComputeServiceTwoNodesMock.java (96%) diff --git a/README.md b/README.md index 76eb42d6195..74a80af6347 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,16 @@ multicast environments). Here is a simple sample configuration: ``` cloud: azure: - keystore: /path/to/keystore - password: your_password_for_keystore - subscription_id: your_azure_subscription_id - service_name: your_azure_cloud_service_name + management: + subscription.id: XXX-XXX-XXX-XXX + cloud.service.name: es-demo-app + keystore: + path: /path/to/azurekeystore.pkcs12 + password: WHATEVER + type: pkcs12 + discovery: - type: azure + type: azure ``` How to start (short story) @@ -56,6 +60,41 @@ How to start (short story) * Modify `elasticsearch.yml` file * Start Elasticsearch +Azure credential API settings +----------------------------- + +The following are a list of settings (prefixed with `cloud.azure.management`) that can further control the discovery: + +* `keystore.path`: /path/to/keystore +* `keystore.type`: `pkcs12`, `jceks` or `jks`. Defaults to `pkcs12`. +* `keystore.password`: your_password for the keystore +* `subscription.id`: your_azure_subscription_id +* `cloud.service.name`: your_azure_cloud_service_name + +Note that in previous versions, it was: + +``` +cloud: + azure: + keystore: /path/to/keystore + password: your_password_for_keystore + subscription_id: your_azure_subscription_id + service_name: your_azure_cloud_service_name +``` + +Advanced settings +----------------- + +The following are a list of settings (prefixed with `discovery.azure`) that can further control the discovery: + +* `host.type`: either `public_ip` or `private_ip` (default). Azure discovery will use the one you set to ping +other nodes. This feature was not documented before but was existing under `cloud.azure.host_type`. +* `endpoint.name`: when using `public_ip` this setting is used to identify the endpoint name used to forward requests +to elasticsearch (aka transport port name). Defaults to `elasticsearch`. In Azure management console, you could define +an endpoint `elasticsearch` forwarding for example requests on public IP on port 8100 to the virtual machine on port 9300. +This feature was not documented before but was existing under `cloud.azure.port_name`. + + How to start (long story) -------------------------- diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java deleted file mode 100644 index fc7469b666a..00000000000 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeService.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure; - -import java.util.Set; - -/** - * - */ -public interface AzureComputeService { - - static public final class Fields { - public static final String SUBSCRIPTION_ID = "subscription_id"; - public static final String SERVICE_NAME = "service_name"; - public static final String KEYSTORE = "keystore"; - public static final String PASSWORD = "password"; - public static final String REFRESH = "refresh_interval"; - public static final String PORT_NAME = "port_name"; - public static final String HOST_TYPE = "host_type"; - } - - public Set instances(); -} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java index accba7ba1c1..f7ebdc2ca16 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -22,6 +22,8 @@ package org.elasticsearch.cloud.azure; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; +import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; @@ -40,7 +42,7 @@ import org.elasticsearch.discovery.azure.AzureDiscovery; * to AzureStorageServiceImpl. * * - * @see org.elasticsearch.cloud.azure.AzureComputeServiceImpl + * @see org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl * @see org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl */ public class AzureModule extends AbstractModule { @@ -99,11 +101,23 @@ public class AzureModule extends AbstractModule { return false; } - if (isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID, logger) || - isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SERVICE_NAME, logger) || - isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.KEYSTORE, logger) || - isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.PASSWORD, logger) + if ( // We check new parameters + (isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID) || + isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME) || + isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH) || + isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD)) + // We check deprecated + && (isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID_DEPRECATED) || + isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SERVICE_NAME_DEPRECATED) || + isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.KEYSTORE_DEPRECATED) || + isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.PASSWORD_DEPRECATED)) ) { + logger.debug("one or more azure discovery settings are missing. " + + "Check elasticsearch.yml file and `cloud.azure.management`. Should have [{}], [{}], [{}] and [{}].", + AzureComputeService.Fields.SUBSCRIPTION_ID, + AzureComputeService.Fields.SERVICE_NAME, + AzureComputeService.Fields.KEYSTORE_PATH, + AzureComputeService.Fields.KEYSTORE_PASSWORD); return false; } @@ -123,11 +137,11 @@ public class AzureModule extends AbstractModule { return false; } - if ((isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT, null) || - isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.KEY, null)) && - (isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED, null) || - isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED, null))) { - logger.trace("azure repository is not set [using cloud.azure.storage.{}] and [cloud.azure.storage.{}] properties", + if ((isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT) || + isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.KEY)) && + (isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED) || + isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED))) { + logger.debug("azure repository is not set [using cloud.azure.storage.{}] and [cloud.azure.storage.{}] properties", AzureStorageService.Fields.ACCOUNT, AzureStorageService.Fields.KEY); return false; @@ -142,18 +156,44 @@ public class AzureModule extends AbstractModule { * Check if we are using any deprecated settings */ public static void checkDeprecatedSettings(Settings settings, String oldParameter, String newParameter, ESLogger logger) { - if (!isPropertyMissing(settings, oldParameter, null)) { + if (!isPropertyMissing(settings, oldParameter)) { logger.warn("using deprecated [{}]. Please change it to [{}] property.", oldParameter, newParameter); } } - public static boolean isPropertyMissing(Settings settings, String name, ESLogger logger) throws ElasticsearchException { + /** + * Check all deprecated settings + * @param settings + * @param logger + */ + public static void checkDeprecated(Settings settings, ESLogger logger) { + // Cloud services are disabled + if (isCloudReady(settings)) { + checkDeprecatedSettings(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED, + "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT, logger); + checkDeprecatedSettings(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED, + "cloud.azure.storage." + AzureStorageService.Fields.KEY, logger); + + // TODO Remove in 3.0.0 + checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.KEYSTORE_DEPRECATED, + "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH, logger); + checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.PASSWORD_DEPRECATED, + "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD, logger); + checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.SERVICE_NAME_DEPRECATED, + "cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME, logger); + checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID_DEPRECATED, + "cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID, logger); + checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.HOST_TYPE_DEPRECATED, + "discovery.azure." + AzureComputeService.Fields.HOST_TYPE, logger); + checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.PORT_NAME_DEPRECATED, + "discovery.azure." + AzureComputeService.Fields.ENDPOINT_NAME, logger); + } + } + + public static boolean isPropertyMissing(Settings settings, String name) throws ElasticsearchException { if (!Strings.hasText(settings.get(name))) { - if (logger != null) { - logger.warn("{} is not set or is incorrect.", name); - } return true; } return false; diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java index d00006127e5..f42b8648a0a 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -19,6 +19,7 @@ package org.elasticsearch.cloud.azure; +import org.elasticsearch.cloud.azure.management.AzureComputeService; import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.SettingsFilter; @@ -30,11 +31,18 @@ public class AzureSettingsFilter implements SettingsFilter.Filter { @Override public void filter(ImmutableSettings.Builder settings) { - // Cloud settings - settings.remove("cloud.azure.keystore"); - settings.remove("cloud.azure.password"); - settings.remove("cloud.azure.subscription_id"); - settings.remove("cloud.azure.service_name"); + // Cloud management API settings + settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH); + settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD); + settings.remove("cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID); + settings.remove("cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME); + + // Deprecated Cloud management API settings + // TODO Remove in 3.0.0 + settings.remove("cloud.azure." + AzureComputeService.Fields.KEYSTORE_DEPRECATED); + settings.remove("cloud.azure." + AzureComputeService.Fields.PASSWORD_DEPRECATED); + settings.remove("cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID_DEPRECATED); + settings.remove("cloud.azure." + AzureComputeService.Fields.SERVICE_NAME_DEPRECATED); // Cloud storage API settings settings.remove("cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT); diff --git a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java new file mode 100644 index 00000000000..8202ed07a54 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure.management; + +import org.elasticsearch.cloud.azure.Instance; + +import java.util.Set; + +/** + * + */ +public interface AzureComputeService { + + static public final class Fields { + // Deprecated azure management cloud settings + @Deprecated + public static final String SUBSCRIPTION_ID_DEPRECATED = "subscription_id"; + @Deprecated + public static final String SERVICE_NAME_DEPRECATED = "service_name"; + @Deprecated + public static final String KEYSTORE_DEPRECATED = "keystore"; + @Deprecated + public static final String PASSWORD_DEPRECATED = "password"; + @Deprecated + public static final String PORT_NAME_DEPRECATED = "port_name"; + @Deprecated + public static final String HOST_TYPE_DEPRECATED = "host_type"; + + public static final String SUBSCRIPTION_ID = "subscription.id"; + public static final String SERVICE_NAME = "cloud.service.name"; + + // Keystore settings + public static final String KEYSTORE_PATH = "keystore.path"; + public static final String KEYSTORE_PASSWORD = "keystore.password"; + + public static final String REFRESH = "refresh_interval"; + + public static final String HOST_TYPE = "host.type"; + public static final String ENDPOINT_NAME = "endpoint.name"; + } + + public Set instances(); +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java similarity index 80% rename from src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java rename to src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java index 71372544973..707d707b4e1 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java @@ -17,9 +17,11 @@ * under the License. */ -package org.elasticsearch.cloud.azure; +package org.elasticsearch.cloud.azure.management; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.AzureSettingsFilter; +import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -62,11 +64,13 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent(); } else { try { - InputStream stream = getXML("/services/hostedservices/" + service_name + "?embed-detail=true"); - Set instances = buildInstancesFromXml(stream, port_name); + InputStream stream = getXML("/services/hostedservices/" + serviceName + "?embed-detail=true"); + Set instances = buildInstancesFromXml(stream, publicEndpointName); logger.trace("get instances from azure: {}", instances); return instances; } catch (ParserConfigurationException | XPathExpressionException | SAXException e) { @@ -132,7 +146,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent buildInstancesFromXml(InputStream inputStream, String port_name) throws ParserConfigurationException, IOException, SAXException, XPathExpressionException { + public static Set buildInstancesFromXml(InputStream inputStream, String portName) throws ParserConfigurationException, IOException, SAXException, XPathExpressionException { Set instances = new HashSet<>(); DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); @@ -154,7 +168,7 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent - expression = "InstanceEndpoints/InstanceEndpoint[Name='"+ port_name +"']"; + expression = "InstanceEndpoints/InstanceEndpoint[Name='"+ portName +"']"; NodeList endpoints = (NodeList) xPath.compile(expression).evaluate(node, XPathConstants.NODESET); for (int j = 0; j < endpoints.getLength(); j++) { Node endpoint = endpoints.item(j); diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index 242812a8d3f..3637f4a0045 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -21,7 +21,8 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; -import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Fields; import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.Lists; @@ -71,11 +72,10 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic this.networkService = networkService; this.version = version; - this.refreshInterval = componentSettings.getAsTime(AzureComputeService.Fields.REFRESH, - settings.getAsTime("cloud.azure." + AzureComputeService.Fields.REFRESH, TimeValue.timeValueSeconds(0))); - this.host_type = HostType.valueOf(componentSettings.get(AzureComputeService.Fields.HOST_TYPE, - settings.get("cloud.azure." + AzureComputeService.Fields.HOST_TYPE, HostType.PRIVATE_IP.name())).toUpperCase()); - + this.refreshInterval = componentSettings.getAsTime(Fields.REFRESH, + settings.getAsTime("cloud.azure." + Fields.REFRESH, TimeValue.timeValueSeconds(0))); + this.host_type = HostType.valueOf(componentSettings.get(Fields.HOST_TYPE, + settings.get("cloud.azure." + Fields.HOST_TYPE_DEPRECATED, HostType.PRIVATE_IP.name())).toUpperCase()); } /** diff --git a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java index 75de66c0f5a..8382db0ffc8 100644 --- a/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java +++ b/src/main/java/org/elasticsearch/plugin/cloud/azure/CloudAzurePlugin.java @@ -20,7 +20,6 @@ package org.elasticsearch.plugin.cloud.azure; import org.elasticsearch.cloud.azure.AzureModule; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.logging.ESLogger; @@ -33,7 +32,7 @@ import org.elasticsearch.repositories.azure.AzureRepositoryModule; import java.util.Collection; -import static org.elasticsearch.cloud.azure.AzureModule.checkDeprecatedSettings; +import static org.elasticsearch.cloud.azure.AzureModule.checkDeprecated; import static org.elasticsearch.cloud.azure.AzureModule.isSnapshotReady; /** @@ -46,6 +45,8 @@ public class CloudAzurePlugin extends AbstractPlugin { public CloudAzurePlugin(Settings settings) { this.settings = settings; + logger.trace("starting azure plugin..."); + checkDeprecated(settings, logger); } @Override @@ -71,16 +72,7 @@ public class CloudAzurePlugin extends AbstractPlugin { public void processModule(Module module) { if (isSnapshotReady(settings, logger) && module instanceof RepositoriesModule) { - // Check if we have any deprecated setting - checkDeprecated(); ((RepositoriesModule)module).registerRepository(AzureRepository.TYPE, AzureRepositoryModule.class); } } - - private void checkDeprecated() { - checkDeprecatedSettings(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED, - "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED, - "cloud.azure.storage." + AzureStorageService.Fields.KEY, logger); - } } diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java similarity index 94% rename from src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java rename to src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java index 295a56c93c7..c11060a84a9 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceAbstractMock.java +++ b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceAbstractMock.java @@ -17,10 +17,9 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.cloud.azure.management; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.azure.AzureComputeService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java similarity index 96% rename from src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java rename to src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java index ca83a3285d8..04bad839f13 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceSimpleMock.java +++ b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.cloud.azure.management; import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.common.inject.Inject; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java similarity index 96% rename from src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java rename to src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java index a04366bf5e0..a40ef5887cc 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureComputeServiceTwoNodesMock.java +++ b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.azure; +package org.elasticsearch.cloud.azure.management; import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.common.inject.Inject; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java index 2eac5216b45..1c080ff0715 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AbstractAzureComputeServiceTest.java @@ -20,7 +20,7 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.cloud.azure.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.PluginsService; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java index 4593c4333ba..fde26456955 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java @@ -19,8 +19,8 @@ package org.elasticsearch.discovery.azure; -import org.elasticsearch.cloud.azure.AzureComputeServiceImpl; import org.elasticsearch.cloud.azure.Instance; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl; import org.junit.Assert; import org.junit.Test; import org.xml.sax.SAXException; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java index f3264380b7e..042fd079a96 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureMinimumMasterNodesTest.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.azure; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceTwoNodesMock; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.MasterNotDiscoveredException; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index 0d9fbd939f8..2c9b745cd01 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.azure; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceSimpleMock; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index d6d807a73b3..72b323f3662 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.azure; +import org.elasticsearch.cloud.azure.management.AzureComputeServiceTwoNodesMock; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; From 85ac406f63eceb459cda2b1b0f57f4366dd6ede0 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 17:11:56 +0100 Subject: [PATCH 095/125] Use Azure Java Management SDK 0.7.0 This first version adds `azure-management` 0.7.0 instead of using our own XML implementation. We can now have more control and give more options to the users. We now support different keystore types using `cloud.azure.management.keystore.type`: * `pkcs12` * `jceks` * `jks` Closes #38 (cherry picked from commit 72c77d3) (cherry picked from commit d2541ab) --- README.md | 13 +- pom.xml | 11 ++ src/main/assemblies/plugin.xml | 2 + .../azure/AzureServiceDisableException.java | 36 ++++ .../azure/AzureServiceRemoteException.java | 36 ++++ .../cloud/azure/AzureSettingsFilter.java | 4 + .../elasticsearch/cloud/azure/Instance.java | 113 ----------- .../azure/management/AzureComputeService.java | 7 +- .../management/AzureComputeServiceImpl.java | 180 +++++------------- .../azure/AzureUnicastHostsProvider.java | 157 +++++++++++---- .../AzureComputeServiceSimpleMock.java | 38 +++- .../AzureComputeServiceTwoNodesMock.java | 63 ++++-- .../azure/AzureInstanceXmlParserTest.java | 61 ------ .../discovery/azure/AzureSimpleTest.java | 40 +++- .../azure/AzureTwoStartedNodesTest.java | 31 ++- .../org/elasticsearch/azure/test/services.xml | 170 ----------------- 16 files changed, 406 insertions(+), 556 deletions(-) create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java delete mode 100644 src/main/java/org/elasticsearch/cloud/azure/Instance.java delete mode 100644 src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java delete mode 100644 src/test/resources/org/elasticsearch/azure/test/services.xml diff --git a/README.md b/README.md index 74a80af6347..76f2d650b69 100644 --- a/README.md +++ b/README.md @@ -323,12 +323,15 @@ And add the following lines: # If you don't remember your account id, you may get it with `azure account list` cloud: azure: - keystore: /home/elasticsearch/azurekeystore.pkcs12 - password: your_password_for_keystore - subscription_id: your_azure_subscription_id - service_name: your_azure_cloud_service_name + management: + subscription.id: your_azure_subscription_id + cloud.service.name: your_azure_cloud_service_name + keystore: + path: /home/elasticsearch/azurekeystore.pkcs12 + password: your_password_for_keystore + discovery: - type: azure + type: azure # Recommended (warning: non durable disk) # path.data: /mnt/resource/elasticsearch/data diff --git a/pom.xml b/pom.xml index 387c788b755..e1dcf42c02b 100644 --- a/pom.xml +++ b/pom.xml @@ -73,6 +73,17 @@ governing permissions and limitations under the License. --> 2.0.0 + + com.microsoft.azure + azure-management-compute + 0.7.0 + + + com.microsoft.azure + azure-management + 0.7.0 + + log4j log4j diff --git a/src/main/assemblies/plugin.xml b/src/main/assemblies/plugin.xml index 3de1c5befa5..bf4bf511b12 100644 --- a/src/main/assemblies/plugin.xml +++ b/src/main/assemblies/plugin.xml @@ -31,6 +31,8 @@ governing permissions and limitations under the License. --> true com.microsoft.azure:azure-storage + com.microsoft.azure:azure-management + com.microsoft.azure:azure-management-compute diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java b/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java new file mode 100644 index 00000000000..8bd2ebc3e59 --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureServiceDisableException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.ElasticsearchIllegalStateException; + +public class AzureServiceDisableException extends ElasticsearchIllegalStateException { + public AzureServiceDisableException() { + super(null); + } + + public AzureServiceDisableException(String msg) { + super(msg); + } + + public AzureServiceDisableException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java b/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java new file mode 100644 index 00000000000..088ca44545a --- /dev/null +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureServiceRemoteException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cloud.azure; + +import org.elasticsearch.ElasticsearchIllegalStateException; + +public class AzureServiceRemoteException extends ElasticsearchIllegalStateException { + public AzureServiceRemoteException() { + super(null); + } + + public AzureServiceRemoteException(String msg) { + super(msg); + } + + public AzureServiceRemoteException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java index f42b8648a0a..313e80f35bc 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java @@ -31,9 +31,13 @@ public class AzureSettingsFilter implements SettingsFilter.Filter { @Override public void filter(ImmutableSettings.Builder settings) { + // Cloud global settings + settings.remove("cloud.azure." + AzureComputeService.Fields.REFRESH); + // Cloud management API settings settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH); settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD); + settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_TYPE); settings.remove("cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID); settings.remove("cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME); diff --git a/src/main/java/org/elasticsearch/cloud/azure/Instance.java b/src/main/java/org/elasticsearch/cloud/azure/Instance.java deleted file mode 100644 index e81987b66d1..00000000000 --- a/src/main/java/org/elasticsearch/cloud/azure/Instance.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure; - -/** - * Define an Azure Instance - */ -public class Instance { - public static enum Status { - STARTED - } - - private String privateIp; - private String publicIp; - private String publicPort; - private Status status; - private String name; - - public String getPrivateIp() { - return privateIp; - } - - public void setPrivateIp(String privateIp) { - this.privateIp = privateIp; - } - - public Status getStatus() { - return status; - } - - public void setStatus(Status status) { - this.status = status; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getPublicIp() { - return publicIp; - } - - public void setPublicIp(String publicIp) { - this.publicIp = publicIp; - } - - public String getPublicPort() { - return publicPort; - } - - public void setPublicPort(String publicPort) { - this.publicPort = publicPort; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Instance instance = (Instance) o; - - if (name != null ? !name.equals(instance.name) : instance.name != null) return false; - if (privateIp != null ? !privateIp.equals(instance.privateIp) : instance.privateIp != null) return false; - if (publicIp != null ? !publicIp.equals(instance.publicIp) : instance.publicIp != null) return false; - if (publicPort != null ? !publicPort.equals(instance.publicPort) : instance.publicPort != null) return false; - if (status != instance.status) return false; - - return true; - } - - @Override - public int hashCode() { - int result = privateIp != null ? privateIp.hashCode() : 0; - result = 31 * result + (publicIp != null ? publicIp.hashCode() : 0); - result = 31 * result + (publicPort != null ? publicPort.hashCode() : 0); - result = 31 * result + (status != null ? status.hashCode() : 0); - result = 31 * result + (name != null ? name.hashCode() : 0); - return result; - } - - @Override - public String toString() { - final StringBuffer sb = new StringBuffer("Instance{"); - sb.append("privateIp='").append(privateIp).append('\''); - sb.append(", publicIp='").append(publicIp).append('\''); - sb.append(", publicPort='").append(publicPort).append('\''); - sb.append(", status=").append(status); - sb.append(", name='").append(name).append('\''); - sb.append('}'); - return sb.toString(); - } -} diff --git a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java index 8202ed07a54..28d72404b1f 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -19,9 +19,7 @@ package org.elasticsearch.cloud.azure.management; -import org.elasticsearch.cloud.azure.Instance; - -import java.util.Set; +import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; /** * @@ -49,6 +47,7 @@ public interface AzureComputeService { // Keystore settings public static final String KEYSTORE_PATH = "keystore.path"; public static final String KEYSTORE_PASSWORD = "keystore.password"; + public static final String KEYSTORE_TYPE = "keystore.type"; public static final String REFRESH = "refresh_interval"; @@ -56,5 +55,5 @@ public interface AzureComputeService { public static final String ENDPOINT_NAME = "endpoint.name"; } - public Set instances(); + public HostedServiceGetDetailedResponse getServiceDetails(); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java index 707d707b4e1..baad3236259 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java @@ -19,38 +19,24 @@ package org.elasticsearch.cloud.azure.management; +import com.microsoft.windowsazure.Configuration; +import com.microsoft.windowsazure.core.utils.KeyStoreType; +import com.microsoft.windowsazure.management.compute.ComputeManagementClient; +import com.microsoft.windowsazure.management.compute.ComputeManagementService; +import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; +import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cloud.azure.AzureServiceDisableException; +import org.elasticsearch.cloud.azure.AzureServiceRemoteException; import org.elasticsearch.cloud.azure.AzureSettingsFilter; -import org.elasticsearch.cloud.azure.Instance; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import org.w3c.dom.Document; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.xml.sax.SAXException; -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocketFactory; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.xpath.XPath; -import javax.xml.xpath.XPathConstants; -import javax.xml.xpath.XPathExpressionException; -import javax.xml.xpath.XPathFactory; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStream; -import java.net.URL; -import java.security.*; -import java.security.cert.CertificateException; -import java.util.HashSet; -import java.util.Set; +import java.net.URI; +import java.net.URISyntaxException; /** * @@ -60,140 +46,57 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent instances() { - if (socketFactory == null) { + public HostedServiceGetDetailedResponse getServiceDetails() { + if (computeManagementClient == null) { // Azure plugin is disabled - logger.trace("azure plugin is disabled. Returning an empty list of nodes."); - return new HashSet<>(); - } else { - try { - InputStream stream = getXML("/services/hostedservices/" + serviceName + "?embed-detail=true"); - Set instances = buildInstancesFromXml(stream, publicEndpointName); - logger.trace("get instances from azure: {}", instances); - return instances; - } catch (ParserConfigurationException | XPathExpressionException | SAXException e) { - logger.warn("can not parse XML response: {}", e.getMessage()); - } catch (Exception e) { - logger.warn("can not get list of azure nodes: {}", e.getMessage()); - } - } - return new HashSet<>(); - } - - private static String extractValueFromPath(Node node, String path) throws XPathExpressionException { - XPath xPath = XPathFactory.newInstance().newXPath(); - Node subnode = (Node) xPath.compile(path).evaluate(node, XPathConstants.NODE); - return subnode.getFirstChild().getNodeValue(); - } - - public static Set buildInstancesFromXml(InputStream inputStream, String portName) throws ParserConfigurationException, IOException, SAXException, XPathExpressionException { - Set instances = new HashSet<>(); - - DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); - DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); - Document doc = dBuilder.parse(inputStream); - - doc.getDocumentElement().normalize(); - - XPath xPath = XPathFactory.newInstance().newXPath(); - - // We only fetch Started nodes (TODO: should we start with all nodes whatever the status is?) - String expression = "/HostedService/Deployments/Deployment/RoleInstanceList/RoleInstance[PowerState='Started']"; - NodeList nodeList = (NodeList) xPath.compile(expression).evaluate(doc, XPathConstants.NODESET); - for (int i = 0; i < nodeList.getLength(); i++) { - Instance instance = new Instance(); - Node node = nodeList.item(i); - instance.setPrivateIp(extractValueFromPath(node, "IpAddress")); - instance.setName(extractValueFromPath(node, "InstanceName")); - instance.setStatus(Instance.Status.STARTED); - - // Let's digg into - expression = "InstanceEndpoints/InstanceEndpoint[Name='"+ portName +"']"; - NodeList endpoints = (NodeList) xPath.compile(expression).evaluate(node, XPathConstants.NODESET); - for (int j = 0; j < endpoints.getLength(); j++) { - Node endpoint = endpoints.item(j); - instance.setPublicIp(extractValueFromPath(endpoint, "Vip")); - instance.setPublicPort(extractValueFromPath(endpoint, "PublicPort")); - } - - instances.add(instance); + throw new AzureServiceDisableException("azure plugin is disabled."); } - return instances; - } - - private SSLSocketFactory getSocketFactory(String keystore, String password) throws NoSuchAlgorithmException, KeyStoreException, IOException, CertificateException, UnrecoverableKeyException, KeyManagementException { - File pKeyFile = new File(keystore); - KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); - KeyStore keyStore = KeyStore.getInstance("PKCS12"); - InputStream keyInput = new FileInputStream(pKeyFile); - keyStore.load(keyInput, password.toCharArray()); - keyInput.close(); - keyManagerFactory.init(keyStore, password.toCharArray()); - - SSLContext context = SSLContext.getInstance("TLS"); - context.init(keyManagerFactory.getKeyManagers(), null, new SecureRandom()); - return context.getSocketFactory(); + try { + return computeManagementClient.getHostedServicesOperations().getDetailed(serviceName); + } catch (Exception e) { + throw new AzureServiceRemoteException("can not get list of azure nodes", e); + } } @Override @@ -206,5 +109,12 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent cachedDiscoNodes; - private final HostType host_type; + private final HostType hostType; + private final String publicEndpointName; + private final String deploymentName; + private final DeploymentSlot deploymentSlot; @Inject public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, @@ -74,8 +77,30 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic this.refreshInterval = componentSettings.getAsTime(Fields.REFRESH, settings.getAsTime("cloud.azure." + Fields.REFRESH, TimeValue.timeValueSeconds(0))); - this.host_type = HostType.valueOf(componentSettings.get(Fields.HOST_TYPE, - settings.get("cloud.azure." + Fields.HOST_TYPE_DEPRECATED, HostType.PRIVATE_IP.name())).toUpperCase()); + + HostType tmpHostType; + String strHostType = componentSettings.get(Fields.HOST_TYPE, + settings.get("cloud.azure." + Fields.HOST_TYPE_DEPRECATED, HostType.PRIVATE_IP.name())).toUpperCase(); + try { + tmpHostType = HostType.valueOf(strHostType); + } catch (IllegalArgumentException e) { + logger.warn("wrong value for [{}]: [{}]. falling back to [{}]...", Fields.HOST_TYPE, + strHostType, HostType.PRIVATE_IP.name().toLowerCase()); + tmpHostType = HostType.PRIVATE_IP; + } + this.hostType = tmpHostType; + + // TODO Remove in 3.0.0 + String portName = settings.get("cloud.azure." + Fields.PORT_NAME_DEPRECATED); + if (portName != null) { + logger.warn("setting [{}] has been deprecated. please replace with [{}].", Fields.PORT_NAME_DEPRECATED, Fields.ENDPOINT_NAME); + this.publicEndpointName = portName; + } else { + this.publicEndpointName = componentSettings.get(Fields.ENDPOINT_NAME, "elasticsearch"); + } + + this.deploymentName = componentSettings.get(Fields.SERVICE_NAME, settings.get("cloud.azure." + Fields.SERVICE_NAME_DEPRECATED)); + this.deploymentSlot = DeploymentSlot.Production; } /** @@ -98,60 +123,110 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic cachedDiscoNodes = Lists.newArrayList(); - Set response = azureComputeService.instances(); - - String ipAddress = null; + HostedServiceGetDetailedResponse detailed; try { - InetAddress inetAddress = networkService.resolvePublishHostAddress(null); - if (inetAddress != null) { - ipAddress = inetAddress.getHostAddress(); - } - logger.trace("ipAddress found: [{}]", ipAddress); - } catch (IOException e) { - // We can't find the publish host address... Hmmm. Too bad :-( - logger.trace("exception while finding ipAddress", e); + detailed = azureComputeService.getServiceDetails(); + } catch (AzureServiceDisableException e) { + logger.debug("Azure discovery service has been disabled. Returning empty list of nodes."); + return cachedDiscoNodes; + } catch (AzureServiceRemoteException e) { + // We got a remote exception + logger.warn("can not get list of azure nodes: [{}]. Returning empty list of nodes.", e.getMessage()); + logger.trace("AzureServiceRemoteException caught", e); + return cachedDiscoNodes; } + InetAddress ipAddress = null; try { - for (Instance instance : response) { + ipAddress = networkService.resolvePublishHostAddress(null); + logger.trace("ip of current node: [{}]", ipAddress); + } catch (IOException e) { + // We can't find the publish host address... Hmmm. Too bad :-( + logger.trace("exception while finding ip", e); + } + + for (HostedServiceGetDetailedResponse.Deployment deployment : detailed.getDeployments()) { + // We check the deployment slot + if (deployment.getDeploymentSlot() != deploymentSlot) { + logger.debug("current deployment slot [{}] for [{}] is different from [{}]. skipping...", + deployment.getDeploymentSlot(), deployment.getName(), deploymentSlot); + continue; + } + + // If provided, we check the deployment name + if (deploymentName != null && !deploymentName.equals(deployment.getName())) { + logger.debug("current deployment name [{}] different from [{}]. skipping...", + deployment.getName(), deploymentName); + continue; + } + + // We check current deployment status + if (deployment.getStatus() != DeploymentStatus.Starting && + deployment.getStatus() != DeploymentStatus.Deploying && + deployment.getStatus() != DeploymentStatus.Running) { + logger.debug("[{}] status is [{}]. skipping...", + deployment.getName(), deployment.getStatus()); + continue; + } + + // In other case, it should be the right deployment so we can add it to the list of instances + + for (RoleInstance instance : deployment.getRoleInstances()) { String networkAddress = null; // Let's detect if we want to use public or private IP - if (host_type == HostType.PRIVATE_IP) { - if (instance.getPrivateIp() != null) { - if (instance.getPrivateIp().equals(ipAddress)) { - logger.trace("adding ourselves {}", ipAddress); + switch (hostType) { + case PRIVATE_IP: + InetAddress privateIp = instance.getIPAddress(); + + if (privateIp != null) { + if (privateIp.equals(ipAddress)) { + logger.trace("adding ourselves {}", ipAddress); + } + networkAddress = privateIp.getHostAddress(); + } else { + logger.trace("no private ip provided. ignoring [{}]...", instance.getInstanceName()); } - networkAddress = instance.getPrivateIp(); - } else { - logger.trace("no private ip provided ignoring {}", instance.getName()); - } - } - if (host_type == HostType.PUBLIC_IP) { - if (instance.getPublicIp() != null && instance.getPublicPort() != null) { - networkAddress = instance.getPublicIp() + ":" +instance.getPublicPort(); - } else { - logger.trace("no public ip provided ignoring {}", instance.getName()); - } + break; + case PUBLIC_IP: + for (InstanceEndpoint endpoint : instance.getInstanceEndpoints()) { + if (!publicEndpointName.equals(endpoint.getName())) { + logger.trace("ignoring endpoint [{}] as different than [{}]", + endpoint.getName(), publicEndpointName); + continue; + } + + networkAddress = endpoint.getVirtualIPAddress().getHostAddress() + ":" + endpoint.getPort(); + } + + if (networkAddress == null) { + logger.trace("no public ip provided. ignoring [{}]...", instance.getInstanceName()); + } + break; + default: + // This could never happen! + logger.warn("undefined host_type [{}]. Please check your settings.", hostType); + return cachedDiscoNodes; } if (networkAddress == null) { // We have a bad parameter here or not enough information from azure - throw new ElasticsearchIllegalArgumentException("can't find any " + host_type.name() + " address"); - } else { + logger.warn("no network address found. ignoring [{}]...", instance.getInstanceName()); + continue; + } + + try { TransportAddress[] addresses = transportService.addressesFromString(networkAddress); // we only limit to 1 addresses, makes no sense to ping 100 ports logger.trace("adding {}, transport_address {}", networkAddress, addresses[0]); - cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getName(), addresses[0], version.minimumCompatibilityVersion())); + cachedDiscoNodes.add(new DiscoveryNode("#cloud-" + instance.getInstanceName(), addresses[0], + version.minimumCompatibilityVersion())); + } catch (Exception e) { + logger.warn("can not convert [{}] to transport address. skipping. [{}]", networkAddress, e.getMessage()); } - } - } catch (Throwable e) { - logger.warn("Exception caught during discovery {} : {}", e.getClass().getName(), e.getMessage()); - logger.trace("Exception caught during discovery", e); } logger.debug("{} node(s) added", cachedDiscoNodes.size()); - logger.debug("using dynamic discovery nodes {}", cachedDiscoNodes); return cachedDiscoNodes; } diff --git a/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java index 04bad839f13..fccd9fec71a 100644 --- a/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java +++ b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceSimpleMock.java @@ -19,12 +19,12 @@ package org.elasticsearch.cloud.azure.management; -import org.elasticsearch.cloud.azure.Instance; +import com.microsoft.windowsazure.management.compute.models.*; +import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.util.HashSet; -import java.util.Set; +import java.net.InetAddress; /** * Mock Azure API with a single started node @@ -37,12 +37,32 @@ public class AzureComputeServiceSimpleMock extends AzureComputeServiceAbstractMo } @Override - public Set instances() { - Set instances = new HashSet<>(); - Instance azureHost = new Instance(); - azureHost.setPrivateIp("127.0.0.1"); - instances.add(azureHost); + public HostedServiceGetDetailedResponse getServiceDetails() { + HostedServiceGetDetailedResponse response = new HostedServiceGetDetailedResponse(); + HostedServiceGetDetailedResponse.Deployment deployment = new HostedServiceGetDetailedResponse.Deployment(); - return instances; + // Fake the deployment + deployment.setName("dummy"); + deployment.setDeploymentSlot(DeploymentSlot.Production); + deployment.setStatus(DeploymentStatus.Running); + + // Fake an instance + RoleInstance instance = new RoleInstance(); + instance.setInstanceName("dummy1"); + + // Fake the private IP + instance.setIPAddress(InetAddress.getLoopbackAddress()); + + // Fake the public IP + InstanceEndpoint endpoint = new InstanceEndpoint(); + endpoint.setName("elasticsearch"); + endpoint.setVirtualIPAddress(InetAddress.getLoopbackAddress()); + endpoint.setPort(9400); + instance.setInstanceEndpoints(Lists.newArrayList(endpoint)); + + deployment.setRoleInstances(Lists.newArrayList(instance)); + response.setDeployments(Lists.newArrayList(deployment)); + + return response; } } diff --git a/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java index a40ef5887cc..30900e6e4e7 100644 --- a/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java +++ b/src/test/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceTwoNodesMock.java @@ -19,32 +19,69 @@ package org.elasticsearch.cloud.azure.management; -import org.elasticsearch.cloud.azure.Instance; +import com.microsoft.windowsazure.management.compute.models.*; +import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import java.util.HashSet; -import java.util.Set; +import java.net.InetAddress; /** * Mock Azure API with two started nodes */ public class AzureComputeServiceTwoNodesMock extends AzureComputeServiceAbstractMock { + NetworkService networkService; + @Inject - protected AzureComputeServiceTwoNodesMock(Settings settings) { + protected AzureComputeServiceTwoNodesMock(Settings settings, NetworkService networkService) { super(settings); + this.networkService = networkService; } @Override - public Set instances() { - Set instances = new HashSet<>(); - Instance azureHost = new Instance(); - azureHost.setPrivateIp("127.0.0.1"); - instances.add(azureHost); - azureHost = new Instance(); - azureHost.setPrivateIp("127.0.0.1"); - instances.add(azureHost); - return instances; + public HostedServiceGetDetailedResponse getServiceDetails() { + HostedServiceGetDetailedResponse response = new HostedServiceGetDetailedResponse(); + HostedServiceGetDetailedResponse.Deployment deployment = new HostedServiceGetDetailedResponse.Deployment(); + + // Fake the deployment + deployment.setName("dummy"); + deployment.setDeploymentSlot(DeploymentSlot.Production); + deployment.setStatus(DeploymentStatus.Running); + + // Fake a first instance + RoleInstance instance1 = new RoleInstance(); + instance1.setInstanceName("dummy1"); + + // Fake the private IP + instance1.setIPAddress(InetAddress.getLoopbackAddress()); + + // Fake the public IP + InstanceEndpoint endpoint1 = new InstanceEndpoint(); + endpoint1.setName("elasticsearch"); + endpoint1.setVirtualIPAddress(InetAddress.getLoopbackAddress()); + endpoint1.setPort(9400); + instance1.setInstanceEndpoints(Lists.newArrayList(endpoint1)); + + // Fake a first instance + RoleInstance instance2 = new RoleInstance(); + instance2.setInstanceName("dummy1"); + + // Fake the private IP + instance2.setIPAddress(InetAddress.getLoopbackAddress()); + + // Fake the public IP + InstanceEndpoint endpoint2 = new InstanceEndpoint(); + endpoint2.setName("elasticsearch"); + endpoint2.setVirtualIPAddress(InetAddress.getLoopbackAddress()); + endpoint2.setPort(9401); + instance2.setInstanceEndpoints(Lists.newArrayList(endpoint2)); + + deployment.setRoleInstances(Lists.newArrayList(instance1, instance2)); + + response.setDeployments(Lists.newArrayList(deployment)); + + return response; } } diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java deleted file mode 100644 index fde26456955..00000000000 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureInstanceXmlParserTest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.azure; - -import org.elasticsearch.cloud.azure.Instance; -import org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl; -import org.junit.Assert; -import org.junit.Test; -import org.xml.sax.SAXException; - -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.xpath.XPathExpressionException; -import java.io.IOException; -import java.io.InputStream; -import java.util.HashSet; -import java.util.Set; - -public class AzureInstanceXmlParserTest { - - private Instance build(String name, String privateIpAddress, - String publicIpAddress, - String publicPort, - Instance.Status status) { - Instance instance = new Instance(); - instance.setName(name); - instance.setPrivateIp(privateIpAddress); - instance.setPublicIp(publicIpAddress); - instance.setPublicPort(publicPort); - instance.setStatus(status); - return instance; - } - - @Test - public void testReadXml() throws ParserConfigurationException, SAXException, XPathExpressionException, IOException { - InputStream inputStream = AzureInstanceXmlParserTest.class.getResourceAsStream("/org/elasticsearch/azure/test/services.xml"); - Set instances = AzureComputeServiceImpl.buildInstancesFromXml(inputStream, "elasticsearch"); - - Set expected = new HashSet<>(); - expected.add(build("es-windows2008", "10.53.250.55", null, null, Instance.Status.STARTED)); - expected.add(build("myesnode1", "10.53.218.75", "137.116.213.150", "9300", Instance.Status.STARTED)); - - Assert.assertArrayEquals(expected.toArray(), instances.toArray()); - } -} diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java index 2c9b745cd01..ce1af466f23 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureSimpleTest.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.cloud.azure.management.AzureComputeServiceSimpleMock; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -36,9 +37,44 @@ public class AzureSimpleTest extends AbstractAzureComputeServiceTest { } @Test - public void one_node_should_run() { + public void one_node_should_run_using_private_ip() { + ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder() + .put("cloud.azure.service_name", "dummy") + .put("cloud.azure.host_type", "private_ip") + .put(super.settingsBuilder()); + logger.info("--> start one node"); - internalCluster().startNode(settingsBuilder()); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 1 node as part of the cluster, let's test that + checkNumberOfNodes(1); + } + + @Test + public void one_node_should_run_using_public_ip() { + ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder() + .put("cloud.azure.service_name", "dummy") + .put("cloud.azure.host_type", "public_ip") + .put(super.settingsBuilder()); + + logger.info("--> start one node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 1 node as part of the cluster, let's test that + checkNumberOfNodes(1); + } + + @Test + public void one_node_should_run_using_wrong_settings() { + ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder() + .put("cloud.azure.service_name", "dummy") + .put("cloud.azure.host_type", "do_not_exist") + .put(super.settingsBuilder()); + + logger.info("--> start one node"); + internalCluster().startNode(settings); assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); // We expect having 1 node as part of the cluster, let's test that diff --git a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java index 72b323f3662..fc872b0be49 100644 --- a/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java +++ b/src/test/java/org/elasticsearch/discovery/azure/AzureTwoStartedNodesTest.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.cloud.azure.management.AzureComputeServiceTwoNodesMock; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -36,13 +37,37 @@ public class AzureTwoStartedNodesTest extends AbstractAzureComputeServiceTest { } @Test - public void two_nodes_should_run() { + public void two_nodes_should_run_using_private_ip() { + ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder() + .put("cloud.azure.service_name", "dummy") + .put("cloud.azure.host_type", "private_ip") + .put(super.settingsBuilder()); + logger.info("--> start first node"); - internalCluster().startNode(settingsBuilder()); + internalCluster().startNode(settings); assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); logger.info("--> start another node"); - internalCluster().startNode(settingsBuilder()); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + // We expect having 2 nodes as part of the cluster, let's test that + checkNumberOfNodes(2); + } + + @Test + public void two_nodes_should_run_using_public_ip() { + ImmutableSettings.Builder settings = ImmutableSettings.settingsBuilder() + .put("cloud.azure.service_name", "dummy") + .put("cloud.azure.host_type", "public_ip") + .put(super.settingsBuilder()); + + logger.info("--> start first node"); + internalCluster().startNode(settings); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); + + logger.info("--> start another node"); + internalCluster().startNode(settings); assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("1s").execute().actionGet().getState().nodes().masterNodeId(), notNullValue()); // We expect having 2 nodes as part of the cluster, let's test that diff --git a/src/test/resources/org/elasticsearch/azure/test/services.xml b/src/test/resources/org/elasticsearch/azure/test/services.xml deleted file mode 100644 index 0b8bd9ab1f8..00000000000 --- a/src/test/resources/org/elasticsearch/azure/test/services.xml +++ /dev/null @@ -1,170 +0,0 @@ - - - https://management.core.windows.net/a836d210-c681-4916-ae7d-9ca5f1b4906f/services/hostedservices/azure-elasticsearch-cluster - azure-elasticsearch-cluster - - Implicitly created hosted service - West Europe - - Created - 2013-09-23T12:45:45Z - 2013-09-23T13:42:46Z - - - - - azure-elasticsearch-cluster - Production - c6a690796e6b497a918487546193b94a - Running - - http://azure-elasticsearch-cluster.cloudapp.net/ - PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJlcy13aW5kb3dzMjAwOCI+DQogICAgPEluc3RhbmNlcyBjb3VudD0iMSIgLz4NCiAgPC9Sb2xlPg0KICA8Um9sZSBuYW1lPSJteWVzbm9kZTEiPg0KICAgIDxJbnN0YW5jZXMgY291bnQ9IjEiIC8+DQogIDwvUm9sZT4NCjwvU2VydmljZUNvbmZpZ3VyYXRpb24+ - - - es-windows2008 - es-windows2008 - ReadyRole - 0 - 0 - ExtraSmall - - 10.53.250.55 - - - PowerShell - 137.116.213.150 - 5986 - 5986 - tcp - - - Remote Desktop - 137.116.213.150 - 53867 - 3389 - tcp - - - Started - es-windows2008 - E6798DEACEAD4752825A2ABFC0A9A367389ED802 - - - myesnode1 - myesnode1 - ReadyRole - 0 - 0 - ExtraSmall - - 10.53.218.75 - - - ssh - 137.116.213.150 - 22 - 22 - tcp - - - elasticsearch - 137.116.213.150 - 9300 - 9300 - tcp - - - Started - myesnode1 - - - 1 - - - es-windows2008 - - PersistentVMRole - - - NetworkConfiguration - - - 5986 - PowerShell - 5986 - tcp - 137.116.213.150 - - - 3389 - Remote Desktop - 53867 - tcp - 137.116.213.150 - - - - - - - - ReadWrite - azure-elasticsearch-cluster-es-windows2008-0-201309231342520150 - http://portalvhdsv5skghwlmf765.blob.core.windows.net/vhds/azure-elasticsearch-cluster-es-windows2008-2013-09-23.vhd - a699494373c04fc0bc8f2bb1389d6106__Win2K8R2SP1-Datacenter-201308.02-en.us-127GB.vhd - Windows - - ExtraSmall - - - myesnode1 - - PersistentVMRole - - - NetworkConfiguration - - - 22 - ssh - 22 - tcp - 137.116.213.150 - - - - - - - - ReadWrite - azure-elasticsearch-cluster-myesnode1-0-201309231246380270 - http://azureelasticsearchcluste.blob.core.windows.net/vhd-store/myesnode1-0cc86cc84c0ed2e0.vhd - b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GB - Linux - - ExtraSmall - - - - false - false - 2013-09-23T12:46:27Z - 2013-09-23T20:38:58Z - - - 2013-08-27T08:00:00Z - 2013-09-27T20:00:00Z - PersistentVMUpdateScheduled - - - -
    137.116.213.150
    - true - azure-elasticsearch-clusterContractContract -
    -
    -
    -
    -
    From ff0547f04dfd99a5889e8b6d716834dbcbc6f3ae Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 4 Feb 2015 17:11:56 +0100 Subject: [PATCH 096/125] Add keystore.type, deployment.name and deployment.slot settings for discovery We can now support two new options: * `deployment.name`: deployment name if any. Defaults to the value set with `cloud.azure.cloud.service.name`. * `deployment.slot`: either `staging` or `production` (default). (cherry picked from commit 6d7ee49) (cherry picked from commit 10beb68) --- README.md | 16 +++++ .../azure/management/AzureComputeService.java | 2 + .../azure/AzureUnicastHostsProvider.java | 61 ++++++++++++++++--- 3 files changed, 71 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 76f2d650b69..193c4ab5e58 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,23 @@ other nodes. This feature was not documented before but was existing under `clou to elasticsearch (aka transport port name). Defaults to `elasticsearch`. In Azure management console, you could define an endpoint `elasticsearch` forwarding for example requests on public IP on port 8100 to the virtual machine on port 9300. This feature was not documented before but was existing under `cloud.azure.port_name`. +* `deployment.name`: deployment name if any. Defaults to the value set with `cloud.azure.management.cloud.service.name`. +* `deployment.slot`: either `staging` or `production` (default). +For example: + +``` +discovery: + type: azure + azure: + host: + type: private_ip + endpoint: + name: elasticsearch + deployment: + name: your_azure_cloud_service_name + slot: production +``` How to start (long story) -------------------------- diff --git a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java index 28d72404b1f..3fd1a5b2532 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -53,6 +53,8 @@ public interface AzureComputeService { public static final String HOST_TYPE = "host.type"; public static final String ENDPOINT_NAME = "endpoint.name"; + public static final String DEPLOYMENT_NAME = "deployment.name"; + public static final String DEPLOYMENT_SLOT = "deployment.slot"; } public HostedServiceGetDetailedResponse getServiceDetails(); diff --git a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java index 9196c8e2bec..85a29657c56 100644 --- a/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java +++ b/src/main/java/org/elasticsearch/discovery/azure/AzureUnicastHostsProvider.java @@ -46,10 +46,46 @@ import java.util.List; public class AzureUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { public static enum HostType { - PRIVATE_IP, - PUBLIC_IP + PRIVATE_IP("private_ip"), + PUBLIC_IP("public_ip"); + + private String type ; + + private HostType(String type) { + this.type = type ; + } + + public static HostType fromString(String type) { + for (HostType hostType : values()) { + if (hostType.type.equalsIgnoreCase(type)) { + return hostType; + } + } + return null; + } } + public static enum Deployment { + PRODUCTION("production", DeploymentSlot.Production), + STAGING("staging", DeploymentSlot.Staging); + + private String deployment; + private DeploymentSlot slot; + + private Deployment(String deployment, DeploymentSlot slot) { + this.deployment = deployment; + this.slot = slot; + } + + public static Deployment fromString(String string) { + for (Deployment deployment : values()) { + if (deployment.deployment.equalsIgnoreCase(string)) { + return deployment; + } + } + return null; + } + } private final AzureComputeService azureComputeService; private TransportService transportService; @@ -78,12 +114,10 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic this.refreshInterval = componentSettings.getAsTime(Fields.REFRESH, settings.getAsTime("cloud.azure." + Fields.REFRESH, TimeValue.timeValueSeconds(0))); - HostType tmpHostType; String strHostType = componentSettings.get(Fields.HOST_TYPE, settings.get("cloud.azure." + Fields.HOST_TYPE_DEPRECATED, HostType.PRIVATE_IP.name())).toUpperCase(); - try { - tmpHostType = HostType.valueOf(strHostType); - } catch (IllegalArgumentException e) { + HostType tmpHostType = HostType.fromString(strHostType); + if (tmpHostType == null) { logger.warn("wrong value for [{}]: [{}]. falling back to [{}]...", Fields.HOST_TYPE, strHostType, HostType.PRIVATE_IP.name().toLowerCase()); tmpHostType = HostType.PRIVATE_IP; @@ -99,8 +133,19 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic this.publicEndpointName = componentSettings.get(Fields.ENDPOINT_NAME, "elasticsearch"); } - this.deploymentName = componentSettings.get(Fields.SERVICE_NAME, settings.get("cloud.azure." + Fields.SERVICE_NAME_DEPRECATED)); - this.deploymentSlot = DeploymentSlot.Production; + this.deploymentName = componentSettings.get(Fields.DEPLOYMENT_NAME, + settings.get("cloud.azure.management." + Fields.SERVICE_NAME, + settings.get("cloud.azure." + Fields.SERVICE_NAME_DEPRECATED))); + + // Reading deployment_slot + String strDeployment = componentSettings.get(Fields.DEPLOYMENT_SLOT, Deployment.PRODUCTION.deployment); + Deployment tmpDeployment = Deployment.fromString(strDeployment); + if (tmpDeployment == null) { + logger.warn("wrong value for [{}]: [{}]. falling back to [{}]...", Fields.DEPLOYMENT_SLOT, + strDeployment, Deployment.PRODUCTION.deployment); + tmpDeployment = Deployment.PRODUCTION; + } + this.deploymentSlot = tmpDeployment.slot; } /** From 80e6609c7d129a3bc7fc196b5db7e2fbb0eea1f1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 28 Mar 2015 11:36:26 +0100 Subject: [PATCH 097/125] Move parent after artifact coordinates --- pom.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pom.xml b/pom.xml index e1dcf42c02b..10c14473f78 100644 --- a/pom.xml +++ b/pom.xml @@ -15,12 +15,6 @@ governing permissions and limitations under the License. --> xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - - org.elasticsearch - elasticsearch-parent - 2.0.0-SNAPSHOT - - org.elasticsearch elasticsearch-cloud-azure 3.0.0-SNAPSHOT @@ -42,6 +36,12 @@ governing permissions and limitations under the License. --> http://github.com/elasticsearch/elasticsearch-cloud-azure + + org.elasticsearch + elasticsearch-parent + 2.0.0-SNAPSHOT + + From 7833b17b7c1452060a0223687bf2ed19b17b23a1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 28 Mar 2015 11:34:33 +0100 Subject: [PATCH 098/125] Move owner to elastic (cherry picked from commit d50057a) --- pom.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pom.xml b/pom.xml index 10c14473f78..202437ddb0a 100644 --- a/pom.xml +++ b/pom.xml @@ -21,7 +21,7 @@ governing permissions and limitations under the License. --> jar Elasticsearch Azure cloud plugin The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories. - https://github.com/elasticsearch/elasticsearch-cloud-azure/ + https://github.com/elastic/elasticsearch-cloud-azure/ 2013 @@ -31,9 +31,9 @@ governing permissions and limitations under the License. --> - scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git - scm:git:git@github.com:elasticsearch/elasticsearch-cloud-azure.git - http://github.com/elasticsearch/elasticsearch-cloud-azure + scm:git:git@github.com:elastic/elasticsearch-cloud-azure.git + scm:git:git@github.com:elastic/elasticsearch-cloud-azure.git + http://github.com/elastic/elasticsearch-cloud-azure From b7c44087cf8df7da8a528ef632de4fa3b506d60a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 28 Mar 2015 11:43:29 +0100 Subject: [PATCH 099/125] Create branch es-1.5 --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 193c4ab5e58..27b6db0b5f2 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,8 @@ You need to install a version matching your Elasticsearch version: | Elasticsearch | Azure Cloud Plugin| Docs | |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------| | master | Build from source | See below | -| es-1.x | Build from source | [2.6.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#version-260-snapshot-for-elasticsearch-1x)| +| es-1.x | Build from source | [2.7.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.x/#version-270-snapshot-for-elasticsearch-1x)| +| es-1.5 | Build from source | [2.6.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/es-1.5/#version-260-snapshot-for-elasticsearch-15)| | es-1.4 | 2.5.1 | [2.5.1](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.5.1/#version-251-for-elasticsearch-14) | | es-1.3 | 2.4.0 | [2.4.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.4.0/#version-240-for-elasticsearch-13) | | es-1.2 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-azure/tree/v2.3.0/#azure-cloud-plugin-for-elasticsearch) | From cd7b8d484058a5a9f416bc7892a36d4cee7e6398 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 27 Feb 2015 11:15:45 +0100 Subject: [PATCH 100/125] Update settings filter and remove component settings from AbstractComponent Update settings filter to match elasticsearch/elasticsearch#9748 Remove component settings from AbstractComponent as seen in elasticsearch/elasticsearch#9919 Closes #71. Closes #72. --- README.md | 30 ++++---- .../cloud/azure/AzureModule.java | 75 +++++++++---------- .../cloud/azure/AzureSettingsFilter.java | 60 --------------- .../cloud/azure/blobstore/AzureBlobStore.java | 7 +- .../azure/management/AzureComputeService.java | 49 ++++++------ .../management/AzureComputeServiceImpl.java | 20 +++-- .../AzureComputeSettingsFilter.java | 46 ++++++++++++ .../azure/storage/AzureStorageService.java | 20 ++--- .../storage/AzureStorageServiceImpl.java | 14 ++-- .../storage/AzureStorageSettingsFilter.java | 43 +++++++++++ .../discovery/azure/AzureDiscovery.java | 4 +- .../discovery/azure/AzureDiscoveryModule.java | 4 +- .../azure/AzureUnicastHostsProvider.java | 33 ++++---- .../repositories/azure/AzureRepository.java | 23 ++++-- .../azure/AzureRepositoryModule.java | 4 +- .../cloud/azure/AbstractAzureTest.java | 2 +- .../AbstractAzureComputeServiceTest.java | 28 +++++-- .../discovery/azure/AzureSimpleTest.java | 14 ++-- .../azure/AzureTwoStartedNodesTest.java | 10 ++- .../AbstractAzureRepositoryServiceTest.java | 15 ++-- .../azure/AzureSnapshotRestoreITest.java | 49 ++++++------ src/test/resources/elasticsearch.yml | 40 ---------- 22 files changed, 301 insertions(+), 289 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeSettingsFilter.java create mode 100644 src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettingsFilter.java delete mode 100644 src/test/resources/elasticsearch.yml diff --git a/README.md b/README.md index 27b6db0b5f2..25eb515df81 100644 --- a/README.md +++ b/README.md @@ -64,13 +64,13 @@ How to start (short story) Azure credential API settings ----------------------------- -The following are a list of settings (prefixed with `cloud.azure.management`) that can further control the discovery: +The following are a list of settings that can further control the credential API: -* `keystore.path`: /path/to/keystore -* `keystore.type`: `pkcs12`, `jceks` or `jks`. Defaults to `pkcs12`. -* `keystore.password`: your_password for the keystore -* `subscription.id`: your_azure_subscription_id -* `cloud.service.name`: your_azure_cloud_service_name +* `cloud.azure.management.keystore.path`: /path/to/keystore +* `cloud.azure.management.keystore.type`: `pkcs12`, `jceks` or `jks`. Defaults to `pkcs12`. +* `cloud.azure.management.keystore.password`: your_password for the keystore +* `cloud.azure.management.subscription.id`: your_azure_subscription_id +* `cloud.azure.management.cloud.service.name`: your_azure_cloud_service_name Note that in previous versions, it was: @@ -86,16 +86,16 @@ cloud: Advanced settings ----------------- -The following are a list of settings (prefixed with `discovery.azure`) that can further control the discovery: +The following are a list of settings that can further control the discovery: -* `host.type`: either `public_ip` or `private_ip` (default). Azure discovery will use the one you set to ping +* `discovery.azure.host.type`: either `public_ip` or `private_ip` (default). Azure discovery will use the one you set to ping other nodes. This feature was not documented before but was existing under `cloud.azure.host_type`. -* `endpoint.name`: when using `public_ip` this setting is used to identify the endpoint name used to forward requests +* `discovery.azure.endpoint.name`: when using `public_ip` this setting is used to identify the endpoint name used to forward requests to elasticsearch (aka transport port name). Defaults to `elasticsearch`. In Azure management console, you could define an endpoint `elasticsearch` forwarding for example requests on public IP on port 8100 to the virtual machine on port 9300. This feature was not documented before but was existing under `cloud.azure.port_name`. -* `deployment.name`: deployment name if any. Defaults to the value set with `cloud.azure.management.cloud.service.name`. -* `deployment.slot`: either `staging` or `production` (default). +* `discovery.azure.deployment.name`: deployment name if any. Defaults to the value set with `cloud.azure.management.cloud.service.name`. +* `discovery.azure.deployment.slot`: either `staging` or `production` (default). For example: @@ -476,10 +476,10 @@ Example using Java: ```java client.admin().cluster().preparePutRepository("my_backup3") - .setType("azure").setSettings(ImmutableSettings.settingsBuilder() - .put(AzureStorageService.Fields.CONTAINER, "backup_container") - .put(AzureStorageService.Fields.CHUNK_SIZE, new ByteSizeValue(32, ByteSizeUnit.MB)) - ).get(); + .setType("azure").setSettings(ImmutableSettings.settingsBuilder() + .put(Storage.CONTAINER, "backup_container") + .put(Storage.CHUNK_SIZE, new ByteSizeValue(32, ByteSizeUnit.MB)) + ).get(); ``` Repository validation rules diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java index f7ebdc2ca16..d9c21d00b00 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java +++ b/src/main/java/org/elasticsearch/cloud/azure/AzureModule.java @@ -20,10 +20,13 @@ package org.elasticsearch.cloud.azure; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; import org.elasticsearch.cloud.azure.management.AzureComputeService; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery; +import org.elasticsearch.cloud.azure.management.AzureComputeService.Management; import org.elasticsearch.cloud.azure.management.AzureComputeServiceImpl; +import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; +import org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; @@ -63,7 +66,7 @@ public class AzureModule extends AbstractModule { if (isDiscoveryReady(settings, logger)) { logger.debug("starting azure discovery service"); bind(AzureComputeService.class) - .to(settings.getAsClass("cloud.azure.api.impl", AzureComputeServiceImpl.class)) + .to(settings.getAsClass(Management.API_IMPLEMENTATION, AzureComputeServiceImpl.class)) .asEagerSingleton(); } @@ -71,7 +74,7 @@ public class AzureModule extends AbstractModule { if (isSnapshotReady(settings, logger)) { logger.debug("starting azure repository service"); bind(AzureStorageService.class) - .to(settings.getAsClass("repositories.azure.api.impl", AzureStorageServiceImpl.class)) + .to(settings.getAsClass(Storage.API_IMPLEMENTATION, AzureStorageServiceImpl.class)) .asEagerSingleton(); } } @@ -102,22 +105,22 @@ public class AzureModule extends AbstractModule { } if ( // We check new parameters - (isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID) || - isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME) || - isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH) || - isPropertyMissing(settings, "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD)) + (isPropertyMissing(settings, Management.SUBSCRIPTION_ID) || + isPropertyMissing(settings, Management.SERVICE_NAME) || + isPropertyMissing(settings, Management.KEYSTORE_PATH) || + isPropertyMissing(settings, Management.KEYSTORE_PASSWORD)) // We check deprecated - && (isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID_DEPRECATED) || - isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.SERVICE_NAME_DEPRECATED) || - isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.KEYSTORE_DEPRECATED) || - isPropertyMissing(settings, "cloud.azure." + AzureComputeService.Fields.PASSWORD_DEPRECATED)) + && (isPropertyMissing(settings, Management.SUBSCRIPTION_ID_DEPRECATED) || + isPropertyMissing(settings, Management.SERVICE_NAME_DEPRECATED) || + isPropertyMissing(settings, Management.KEYSTORE_DEPRECATED) || + isPropertyMissing(settings, Management.PASSWORD_DEPRECATED)) ) { logger.debug("one or more azure discovery settings are missing. " + - "Check elasticsearch.yml file and `cloud.azure.management`. Should have [{}], [{}], [{}] and [{}].", - AzureComputeService.Fields.SUBSCRIPTION_ID, - AzureComputeService.Fields.SERVICE_NAME, - AzureComputeService.Fields.KEYSTORE_PATH, - AzureComputeService.Fields.KEYSTORE_PASSWORD); + "Check elasticsearch.yml file. Should have [{}], [{}], [{}] and [{}].", + Management.SUBSCRIPTION_ID, + Management.SERVICE_NAME, + Management.KEYSTORE_PATH, + Management.KEYSTORE_PASSWORD); return false; } @@ -137,13 +140,13 @@ public class AzureModule extends AbstractModule { return false; } - if ((isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT) || - isPropertyMissing(settings, "cloud.azure.storage." + AzureStorageService.Fields.KEY)) && - (isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED) || - isPropertyMissing(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED))) { - logger.debug("azure repository is not set [using cloud.azure.storage.{}] and [cloud.azure.storage.{}] properties", - AzureStorageService.Fields.ACCOUNT, - AzureStorageService.Fields.KEY); + if ((isPropertyMissing(settings, Storage.ACCOUNT) || + isPropertyMissing(settings, Storage.KEY)) && + (isPropertyMissing(settings, Storage.ACCOUNT_DEPRECATED) || + isPropertyMissing(settings, Storage.KEY_DEPRECATED))) { + logger.debug("azure repository is not set using [{}] and [{}] properties", + Storage.ACCOUNT, + Storage.KEY); return false; } @@ -171,24 +174,16 @@ public class AzureModule extends AbstractModule { public static void checkDeprecated(Settings settings, ESLogger logger) { // Cloud services are disabled if (isCloudReady(settings)) { - checkDeprecatedSettings(settings, "cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED, - "cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED, - "cloud.azure.storage." + AzureStorageService.Fields.KEY, logger); + checkDeprecatedSettings(settings, Storage.ACCOUNT_DEPRECATED, Storage.ACCOUNT, logger); + checkDeprecatedSettings(settings, Storage.KEY_DEPRECATED, Storage.KEY, logger); // TODO Remove in 3.0.0 - checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.KEYSTORE_DEPRECATED, - "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.PASSWORD_DEPRECATED, - "cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.SERVICE_NAME_DEPRECATED, - "cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID_DEPRECATED, - "cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.HOST_TYPE_DEPRECATED, - "discovery.azure." + AzureComputeService.Fields.HOST_TYPE, logger); - checkDeprecatedSettings(settings, "cloud.azure." + AzureComputeService.Fields.PORT_NAME_DEPRECATED, - "discovery.azure." + AzureComputeService.Fields.ENDPOINT_NAME, logger); + checkDeprecatedSettings(settings, Management.KEYSTORE_DEPRECATED, Management.KEYSTORE_PATH, logger); + checkDeprecatedSettings(settings, Management.PASSWORD_DEPRECATED, Management.KEYSTORE_PASSWORD, logger); + checkDeprecatedSettings(settings, Management.SERVICE_NAME_DEPRECATED, Management.SERVICE_NAME, logger); + checkDeprecatedSettings(settings, Management.SUBSCRIPTION_ID_DEPRECATED, Management.SUBSCRIPTION_ID, logger); + checkDeprecatedSettings(settings, Discovery.HOST_TYPE_DEPRECATED, Discovery.HOST_TYPE, logger); + checkDeprecatedSettings(settings, Discovery.PORT_NAME_DEPRECATED, Discovery.ENDPOINT_NAME, logger); } } diff --git a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java b/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java deleted file mode 100644 index 313e80f35bc..00000000000 --- a/src/main/java/org/elasticsearch/cloud/azure/AzureSettingsFilter.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure; - -import org.elasticsearch.cloud.azure.management.AzureComputeService; -import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.SettingsFilter; - -/** - * Filtering cloud.azure.* settings - */ -public class AzureSettingsFilter implements SettingsFilter.Filter { - - @Override - public void filter(ImmutableSettings.Builder settings) { - // Cloud global settings - settings.remove("cloud.azure." + AzureComputeService.Fields.REFRESH); - - // Cloud management API settings - settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PATH); - settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_PASSWORD); - settings.remove("cloud.azure.management." + AzureComputeService.Fields.KEYSTORE_TYPE); - settings.remove("cloud.azure.management." + AzureComputeService.Fields.SUBSCRIPTION_ID); - settings.remove("cloud.azure.management." + AzureComputeService.Fields.SERVICE_NAME); - - // Deprecated Cloud management API settings - // TODO Remove in 3.0.0 - settings.remove("cloud.azure." + AzureComputeService.Fields.KEYSTORE_DEPRECATED); - settings.remove("cloud.azure." + AzureComputeService.Fields.PASSWORD_DEPRECATED); - settings.remove("cloud.azure." + AzureComputeService.Fields.SUBSCRIPTION_ID_DEPRECATED); - settings.remove("cloud.azure." + AzureComputeService.Fields.SERVICE_NAME_DEPRECATED); - - // Cloud storage API settings - settings.remove("cloud.azure.storage." + AzureStorageService.Fields.ACCOUNT); - settings.remove("cloud.azure.storage." + AzureStorageService.Fields.KEY); - - // Deprecated Cloud storage API settings - // TODO Remove in 3.0.0 - settings.remove("cloud.azure." + AzureStorageService.Fields.ACCOUNT_DEPRECATED); - settings.remove("cloud.azure." + AzureStorageService.Fields.KEY_DEPRECATED); - } -} diff --git a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 75f6dea9adf..6edcae73e07 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -29,10 +29,12 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryName; import org.elasticsearch.repositories.RepositorySettings; -import org.elasticsearch.repositories.azure.AzureRepository; import java.net.URISyntaxException; +import static org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage.CONTAINER; +import static org.elasticsearch.repositories.azure.AzureRepository.CONTAINER_DEFAULT; + /** * */ @@ -48,8 +50,7 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { AzureStorageService client) throws URISyntaxException, StorageException { super(settings); this.client = client; - this.container = repositorySettings.settings().get(AzureStorageService.Fields.CONTAINER, - componentSettings.get(AzureStorageService.Fields.CONTAINER, AzureRepository.CONTAINER_DEFAULT)); + this.container = repositorySettings.settings().get("container", settings.get(CONTAINER, CONTAINER_DEFAULT)); this.repositoryName = name.getName(); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java index 3fd1a5b2532..67f34cd6f23 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -26,36 +26,43 @@ import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDeta */ public interface AzureComputeService { - static public final class Fields { + static public final class Management { // Deprecated azure management cloud settings @Deprecated - public static final String SUBSCRIPTION_ID_DEPRECATED = "subscription_id"; + public static final String SUBSCRIPTION_ID_DEPRECATED = "cloud.azure.subscription_id"; @Deprecated - public static final String SERVICE_NAME_DEPRECATED = "service_name"; + public static final String SERVICE_NAME_DEPRECATED = "cloud.azure.service_name"; @Deprecated - public static final String KEYSTORE_DEPRECATED = "keystore"; + public static final String KEYSTORE_DEPRECATED = "cloud.azure.keystore"; @Deprecated - public static final String PASSWORD_DEPRECATED = "password"; - @Deprecated - public static final String PORT_NAME_DEPRECATED = "port_name"; - @Deprecated - public static final String HOST_TYPE_DEPRECATED = "host_type"; + public static final String PASSWORD_DEPRECATED = "cloud.azure.password"; - public static final String SUBSCRIPTION_ID = "subscription.id"; - public static final String SERVICE_NAME = "cloud.service.name"; + public static final String API_IMPLEMENTATION = "cloud.azure.management.api.impl"; + + public static final String SUBSCRIPTION_ID = "cloud.azure.management.subscription.id"; + public static final String SERVICE_NAME = "cloud.azure.management.cloud.service.name"; // Keystore settings - public static final String KEYSTORE_PATH = "keystore.path"; - public static final String KEYSTORE_PASSWORD = "keystore.password"; - public static final String KEYSTORE_TYPE = "keystore.type"; - - public static final String REFRESH = "refresh_interval"; - - public static final String HOST_TYPE = "host.type"; - public static final String ENDPOINT_NAME = "endpoint.name"; - public static final String DEPLOYMENT_NAME = "deployment.name"; - public static final String DEPLOYMENT_SLOT = "deployment.slot"; + public static final String KEYSTORE_PATH = "cloud.azure.management.keystore.path"; + public static final String KEYSTORE_PASSWORD = "cloud.azure.management.keystore.password"; + public static final String KEYSTORE_TYPE = "cloud.azure.management.keystore.type"; } + static public final class Discovery { + // Deprecated azure discovery cloud settings + @Deprecated + public static final String PORT_NAME_DEPRECATED = "cloud.azure.port_name"; + @Deprecated + public static final String HOST_TYPE_DEPRECATED = "cloud.azure.host_type"; + @Deprecated + public static final String REFRESH_DEPRECATED = "cloud.azure.refresh_interval"; + + public static final String REFRESH = "discovery.azure.refresh_interval"; + + public static final String HOST_TYPE = "discovery.azure.host.type"; + public static final String ENDPOINT_NAME = "discovery.azure.endpoint.name"; + public static final String DEPLOYMENT_NAME = "discovery.azure.deployment.name"; + public static final String DEPLOYMENT_SLOT = "discovery.azure.deployment.slot"; + } public HostedServiceGetDetailedResponse getServiceDetails(); } diff --git a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java index baad3236259..ac8c28b1f92 100644 --- a/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java +++ b/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java @@ -28,16 +28,16 @@ import com.microsoft.windowsazure.management.configuration.ManagementConfigurati import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.azure.AzureServiceDisableException; import org.elasticsearch.cloud.azure.AzureServiceRemoteException; -import org.elasticsearch.cloud.azure.AzureSettingsFilter; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import static org.elasticsearch.cloud.azure.management.AzureComputeService.Management.*; + /** * */ @@ -52,21 +52,19 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent