diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index a5a5371bc2d..7a76d37bf12 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -75,16 +75,18 @@ class JNANatives { } // mlockall failed for some reason - logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg + ". This can result in part of the JVM being swapped out."); + logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg); + logger.warn("This can result in part of the JVM being swapped out."); if (errno == JNACLibrary.ENOMEM) { if (rlimitSuccess) { logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit)); if (Constants.LINUX) { // give specific instructions for the linux case to make it easy + String user = System.getProperty("user.name"); logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" + - "\t# allow user 'esuser' mlockall\n" + - "\tesuser soft memlock unlimited\n" + - "\tesuser hard memlock unlimited" + "\t# allow user '" + user + "' mlockall\n" + + "\t" + user + " soft memlock unlimited\n" + + "\t" + user + " hard memlock unlimited" ); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 449867b4fa9..4fda8391d2b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; +import com.google.common.base.Predicate; import com.google.common.collect.*; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -27,7 +28,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.ArrayList; @@ -162,28 +162,7 @@ public class RoutingTable implements Iterable, Diffable set = new ArrayList<>(); - for (String index : indices) { - IndexRoutingTable indexRoutingTable = index(index); - if (indexRoutingTable == null) { - continue; - // we simply ignore indices that don't exists (make sense for operations that use it currently) - } - for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { - for (ShardRouting shardRouting : indexShardRoutingTable) { - if (shardRouting.active()) { - set.add(shardRouting.shardsIt()); - if (includeRelocationTargets && shardRouting.relocating()) { - set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.buildTargetRelocatingShard()))); - } - } else if (includeEmpty) { // we need this for counting properly, just make it an empty one - set.add(new PlainShardIterator(shardRouting.shardId(), Collections.emptyList())); - } - } - } - } - return new GroupShardsIterator(set); + return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ACTIVE_PREDICATE); } public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty) { @@ -198,6 +177,25 @@ public class RoutingTable implements Iterable, Diffable ACTIVE_PREDICATE = new Predicate() { + @Override + public boolean apply(ShardRouting shardRouting) { + return shardRouting.active(); + } + }; + + private static Predicate ASSIGNED_PREDICATE = new Predicate() { + @Override + public boolean apply(ShardRouting shardRouting) { + return shardRouting.assignedToNode(); + } + }; + + // TODO: replace with JDK 8 native java.util.function.Predicate + private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate predicate) { // use list here since we need to maintain identity across shards ArrayList set = new ArrayList<>(); for (String index : indices) { @@ -208,7 +206,7 @@ public class RoutingTable implements Iterable, Diffable load(String source) throws IOException { - Properties props = new Properties(); + Properties props = new NoDuplicatesProperties(); FastStringReader reader = new FastStringReader(source); try { props.load(reader); @@ -52,7 +53,7 @@ public class PropertiesSettingsLoader implements SettingsLoader { @Override public Map load(byte[] source) throws IOException { - Properties props = new Properties(); + Properties props = new NoDuplicatesProperties(); StreamInput stream = StreamInput.wrap(source); try { props.load(stream); @@ -65,4 +66,15 @@ public class PropertiesSettingsLoader implements SettingsLoader { IOUtils.closeWhileHandlingException(stream); } } + + class NoDuplicatesProperties extends Properties { + @Override + public synchronized Object put(Object key, Object value) { + Object previousValue = super.put(key, value); + if (previousValue != null) { + throw new ElasticsearchParseException("duplicate settings key [{}] found, previous value [{}], current value [{}]", key, previousValue, value); + } + return previousValue; + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java index e3e08fb93f2..23c5d447582 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.settings.loader; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -141,7 +140,18 @@ public abstract class XContentSettingsLoader implements SettingsLoader { sb.append(pathEle).append('.'); } sb.append(fieldName); - settings.put(sb.toString(), parser.text()); + String key = sb.toString(); + String currentValue = parser.text(); + String previousValue = settings.put(key, currentValue); + if (previousValue != null) { + throw new ElasticsearchParseException( + "duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]", + key, + parser.getTokenLocation().lineNumber, + parser.getTokenLocation().columnNumber, + previousValue, + currentValue + ); + } } - } diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 523e9bc5414..23546f123ec 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -43,12 +43,10 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; +import java.nio.file.*; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -253,10 +251,9 @@ public abstract class MetaDataStateFormat { if (dataLocations != null) { // select all eligable files first for (Path dataLocation : dataLocations) { final Path stateDir = dataLocation.resolve(STATE_DIR_NAME); - if (!Files.isDirectory(stateDir)) { - continue; - } // now, iterate over the current versions, and find latest one + // we don't check if the stateDir is present since it could be deleted + // after the check. Also if there is a _state file and it's not a dir something is really wrong try (DirectoryStream paths = Files.newDirectoryStream(stateDir)) { // we don't pass a glob since we need the group part for parsing for (Path stateFile : paths) { final Matcher matcher = stateFilePattern.matcher(stateFile.getFileName().toString()); @@ -270,6 +267,8 @@ public abstract class MetaDataStateFormat { files.add(pav); } } + } catch (NoSuchFileException | FileNotFoundException ex) { + // no _state directory -- move on } } } diff --git a/core/src/main/java/org/elasticsearch/http/HttpServer.java b/core/src/main/java/org/elasticsearch/http/HttpServer.java index 40067e2bff3..0fab142ac18 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpServer.java +++ b/core/src/main/java/org/elasticsearch/http/HttpServer.java @@ -20,6 +20,7 @@ package org.elasticsearch.http; import com.google.common.collect.ImmutableMap; +import com.google.common.io.ByteStreams; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -30,6 +31,7 @@ import org.elasticsearch.node.service.NodeService; import org.elasticsearch.rest.*; import java.io.IOException; +import java.io.InputStream; import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; @@ -114,10 +116,14 @@ public class HttpServer extends AbstractLifecycleComponent { } public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) { - if (request.rawPath().startsWith("/_plugin/")) { + String rawPath = request.rawPath(); + if (rawPath.startsWith("/_plugin/")) { RestFilterChain filterChain = restController.filterChain(pluginSiteFilter); filterChain.continueProcessing(request, channel); return; + } else if (rawPath.equals("/favicon.ico")) { + handleFavicon(request, channel); + return; } restController.dispatchRequest(request, channel); } @@ -131,6 +137,22 @@ public class HttpServer extends AbstractLifecycleComponent { } } + void handleFavicon(HttpRequest request, HttpChannel channel) { + if (request.method() == RestRequest.Method.GET) { + try { + try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) { + byte[] content = ByteStreams.toByteArray(stream); + BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", content); + channel.sendResponse(restResponse); + } + } catch (IOException e) { + channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR)); + } + } else { + channel.sendResponse(new BytesRestResponse(FORBIDDEN)); + } + } + void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException { if (disableSites) { channel.sendResponse(new BytesRestResponse(FORBIDDEN)); diff --git a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index bdcd886486d..76349d0174e 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -62,8 +62,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder of one of the official plugins, or refer to a github repository + Officially supported or commercial plugins require just the plugin name: - The notation of just specifying a plugin name, downloads an officially supported plugin. + plugin install analysis-icu + plugin install shield - The notation of 'elasticsearch/plugin/version' allows to easily download a commercial elastic plugin. + Plugins from GitHub require 'username/repository' or 'username/repository/version': - The notation of 'groupId/artifactId/version' refers to community plugins using maven central or sonatype + plugin install lmenezes/elasticsearch-kopf + plugin install lmenezes/elasticsearch-kopf/1.5.7 - The notation of 'username/repository' refers to a github repository. + Plugins from Maven Central or Sonatype require 'groupId/artifactId/version': - The argument can be an valid which points to a download or file location for the plugin to be loaded from. + plugin install org.elasticsearch/elasticsearch-mapper-attachments/2.6.0 -EXAMPLES + Plugins can be installed from a custom URL or file location as follows: - plugin install analysis-kuromoji - - plugin install elasticsearch/shield/latest - - plugin install lmenezes/elasticsearch-kopf - - plugin install http://download.elasticsearch.org/elasticsearch/elasticsearch-analysis-kuromoji/elasticsearch-analysis-kuromoji-2.7.0.zip - - plugin install file:/path/to/plugin/elasticsearch-analysis-kuromoji-2.7.0.zip + plugin install http://some.domain.name//my-plugin-1.0.0.zip + plugin install file:/path/to/my-plugin-1.0.0.zip OFFICIAL PLUGINS diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java index 142d60871aa..0f90b8c3728 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java @@ -19,19 +19,19 @@ package org.elasticsearch.common.settings.loader; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; import org.junit.Test; import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; /** * */ public class JsonSettingsLoaderTests extends ESTestCase { - @Test public void testSimpleJsonSettings() throws Exception { String json = "/org/elasticsearch/common/settings/loader/test-settings.json"; @@ -50,4 +50,17 @@ public class JsonSettingsLoaderTests extends ESTestCase { assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1")); assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2")); } + + public void testDuplicateKeysThrowsException() { + String json = "{\"foo\":\"bar\",\"foo\":\"baz\"}"; + try { + settingsBuilder() + .loadFromSource(json) + .build(); + fail("expected exception"); + } catch (SettingsException e) { + assertEquals(e.getCause().getClass(), ElasticsearchParseException.class); + assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [1], column number [13], previous value [bar], current value [baz]")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java new file mode 100644 index 00000000000..7a1897fbaf9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings.loader; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.charset.Charset; + +public class PropertiesSettingsLoaderTests extends ESTestCase { + public void testDuplicateKeyFromStringThrowsException() throws IOException { + PropertiesSettingsLoader loader = new PropertiesSettingsLoader(); + try { + loader.load("foo=bar\nfoo=baz"); + fail("expected exception"); + } catch (ElasticsearchParseException e) { + assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]"); + } + } + + public void testDuplicateKeysFromBytesThrowsException() throws IOException { + PropertiesSettingsLoader loader = new PropertiesSettingsLoader(); + try { + loader.load("foo=bar\nfoo=baz".getBytes(Charset.defaultCharset())); + } catch (ElasticsearchParseException e) { + assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]"); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java index 49b5444a52b..60bf80a6e9d 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings.loader; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; @@ -31,7 +32,6 @@ import static org.hamcrest.Matchers.equalTo; * */ public class YamlSettingsLoaderTests extends ESTestCase { - @Test public void testSimpleYamlSettings() throws Exception { String yaml = "/org/elasticsearch/common/settings/loader/test-settings.yml"; @@ -66,4 +66,17 @@ public class YamlSettingsLoaderTests extends ESTestCase { .loadFromStream(yaml, getClass().getResourceAsStream(yaml)) .build(); } -} \ No newline at end of file + + public void testDuplicateKeysThrowsException() { + String yaml = "foo: bar\nfoo: baz"; + try { + settingsBuilder() + .loadFromSource(yaml) + .build(); + fail("expected exception"); + } catch (SettingsException e) { + assertEquals(e.getCause().getClass(), ElasticsearchParseException.class); + assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [2], column number [6], previous value [bar], current value [baz]")); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 6ba7ca0d953..ebaf5e5a39a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -2437,7 +2437,7 @@ public class SimpleIndexQueryParserTests extends ESSingleNodeTestCase { Query parsedQuery = queryParser.parse(query).query(); assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class)); assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(ToParentBlockJoinQuery.class)); - assertThat(((ConstantScoreQuery) parsedQuery).getQuery().toString(), equalTo("ToParentBlockJoinQuery (+*:* #random_access(QueryWrapperFilter(_type:__nested)))")); + assertThat(((ConstantScoreQuery) parsedQuery).getQuery().toString(), equalTo("ToParentBlockJoinQuery (+*:* #QueryWrapperFilter(_type:__nested))")); SearchContext.removeCurrent(); } diff --git a/dev-tools/ElasticSearch.launch b/dev-tools/Elasticsearch.launch similarity index 67% rename from dev-tools/ElasticSearch.launch rename to dev-tools/Elasticsearch.launch index c501e4b8818..2016518cd23 100644 --- a/dev-tools/ElasticSearch.launch +++ b/dev-tools/Elasticsearch.launch @@ -6,12 +6,13 @@ - - - - + + + + + + - - + diff --git a/dev-tools/prepare_release_candidate.py b/dev-tools/prepare_release_candidate.py index baaac881474..fb288f9d081 100644 --- a/dev-tools/prepare_release_candidate.py +++ b/dev-tools/prepare_release_candidate.py @@ -23,7 +23,7 @@ # # python3 ./dev-tools/prepare-release.py # -# Note: Ensure the script is run from the root directory +# Note: Ensure the script is run from the elasticsearch top level directory # import fnmatch @@ -83,12 +83,14 @@ enabled=1 [4] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm [5] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb """ - -def run(command, env_vars=None): +VERBOSE=True +def run(command, env_vars=None, verbose=VERBOSE): if env_vars: for key, value in env_vars.items(): os.putenv(key, value) - if os.system('%s' % (command)): + if not verbose: + command = '%s >> /dev/null 2>&1' % (command) + if os.system(command): raise RuntimeError(' FAILED: %s' % (command)) def ensure_checkout_is_clean(): @@ -181,16 +183,20 @@ if __name__ == "__main__": help='Only runs a maven install to skip the remove deployment step') parser.add_argument('--gpg-key', '-k', dest='gpg_key', default="D88E42B4", help='Allows you to specify a different gpg_key to be used instead of the default release key') + parser.add_argument('--verbose', '-b', dest='verbose', + help='Runs the script in verbose mode') parser.set_defaults(deploy=False) parser.set_defaults(skip_doc_check=False) parser.set_defaults(push=False) parser.set_defaults(install_only=False) + parser.set_defaults(verbose=False) args = parser.parse_args() install_and_deploy = args.deploy skip_doc_check = args.skip_doc_check push = args.push gpg_key = args.gpg_key install_only = args.install_only + VERBOSE = args.verbose ensure_checkout_is_clean() release_version = find_release_version() diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py new file mode 100644 index 00000000000..4ed312bbc51 --- /dev/null +++ b/dev-tools/smoke_test_rc.py @@ -0,0 +1,271 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +# Smoke-tests a release candidate +# +# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL +# 2. Verifies it's sha1 hashes and GPG signatures against the release key +# 3. Installs all official plugins +# 4. Starts one node for tar.gz and zip packages and checks: +# -- if it runs with Java 1.7 +# -- if the build hash given is the one that is returned by the status response +# -- if the build is a release version and not a snapshot version +# -- if all plugins are loaded +# -- if the status response returns the correct version +# +# USAGE: +# +# python3 -B ./dev-tools/smoke_tests_rc.py --version 2.0.0-beta1 --hash bfa3e47 +# +# to also test other plugins try run +# +# python3 -B ./dev-tools/smoke_tests_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher +# +# Note: Ensure the script is run from the elasticsearch top level directory +# + +import argparse +import tempfile +import os +import signal +import shutil +import urllib +import urllib.request +import hashlib +import time +import socket +import json +import base64 + +from prepare_release_candidate import run +from http.client import HTTPConnection + +DEFAULT_PLUGINS = ["analysis-icu", + "analysis-kuromoji", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "cloud-aws", + "cloud-azure", + "cloud-gce", + "delete-by-query", + "discovery-multicast", + "lang-javascript", + "lang-python", + "mapper-murmur3", + "mapper-size"] + +try: + JAVA_HOME = os.environ['JAVA_HOME'] +except KeyError: + raise RuntimeError(""" + Please set JAVA_HOME in the env before running release tool + On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""") + +def java_exe(): + path = JAVA_HOME + return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) + +def verify_java_version(version): + s = os.popen('%s; java -version 2>&1' % java_exe()).read() + if ' version "%s.' % version not in s: + raise RuntimeError('got wrong version for java %s:\n%s' % (version, s)) + + +def sha1(file): + with open(file, 'rb') as f: + return hashlib.sha1(f.read()).hexdigest() + +def read_fully(file): + with open(file, encoding='utf-8') as f: + return f.read() + + +def wait_for_node_startup(host='127.0.0.1', port=9200, timeout=60, header={}): + print(' Waiting until node becomes available for at most %s seconds' % timeout) + for _ in range(timeout): + conn = HTTPConnection(host=host, port=port, timeout=timeout) + try: + time.sleep(1) + conn.request('GET', '', headers=header) + res = conn.getresponse() + if res.status == 200: + return True + except socket.error as e: + pass + #that is ok it might not be there yet + finally: + conn.close() + return False + +def download_and_verify(version, hash, files, base_url='http://download.elasticsearch.org/elasticsearch/staging', plugins=DEFAULT_PLUGINS, verbose=False): + base_url = '%s/%s-%s' % (base_url, version, hash) + print('Downloading and verifying release %s from %s' % (version, base_url)) + tmp_dir = tempfile.mkdtemp() + try: + downloaded_files = [] + print(' ' + '*' * 80) + for file in files: + name = os.path.basename(file) + print(' Smoketest file: %s' % name) + url = '%s/%s' % (base_url, file) + print(' Downloading %s' % (url)) + artifact_path = os.path.join(tmp_dir, file) + downloaded_files.append(artifact_path) + current_artifact_dir = os.path.dirname(artifact_path) + os.makedirs(current_artifact_dir) + urllib.request.urlretrieve(url, os.path.join(tmp_dir, file)) + sha1_url = ''.join([url, '.sha1']) + checksum_file = artifact_path + ".sha1" + print(' Downloading %s' % (sha1_url)) + urllib.request.urlretrieve(sha1_url, checksum_file) + print(' Verifying checksum %s' % (checksum_file)) + expected = read_fully(checksum_file) + actual = sha1(artifact_path) + if expected != actual : + raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual)) + gpg_url = ''.join([url, '.asc']) + gpg_file = artifact_path + ".asc" + print(' Downloading %s' % (gpg_url)) + urllib.request.urlretrieve(gpg_url, gpg_file) + print(' Verifying gpg signature %s' % (gpg_file)) + # here we create a temp gpg home where we download the release key as the only key into + # when we verify the signature it will fail if the signed key is not in the keystore and that + # way we keep the executing host unmodified since we don't have to import the key into the default keystore + gpg_home_dir = os.path.join(current_artifact_dir, "gpg_home_dir") + os.makedirs(gpg_home_dir, 0o700) + run('gpg --homedir %s --keyserver pgp.mit.edu --recv-key D88E42B4' % gpg_home_dir, verbose=verbose) + run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)), verbose=verbose) + print(' ' + '*' * 80) + print() + smoke_test_release(version, downloaded_files, hash, plugins, verbose=verbose) + print(' SUCCESS') + finally: + shutil.rmtree(tmp_dir) + +def smoke_test_release(release, files, expected_hash, plugins, verbose=False): + for release_file in files: + if not os.path.isfile(release_file): + raise RuntimeError('Smoketest failed missing file %s' % (release_file)) + tmp_dir = tempfile.mkdtemp() + if release_file.endswith('tar.gz'): + run('tar -xzf %s -C %s' % (release_file, tmp_dir), verbose=verbose) + elif release_file.endswith('zip'): + run('unzip %s -d %s' % (release_file, tmp_dir), verbose=verbose) + else: + print(' Skip SmokeTest for [%s]' % release_file) + continue # nothing to do here + es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch') + print(' Smoke testing package [%s]' % release_file) + es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/plugin') + plugin_names = {} + for plugin in plugins: + print(' Install plugin [%s]' % (plugin)) + run('%s; %s -Des.plugins.staging=true %s %s' % (java_exe(), es_plugin_path, 'install', plugin), verbose=verbose) + plugin_names[plugin] = True + if 'shield' in plugin_names: + headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") } + es_shield_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/shield/esusers') + print(" Install dummy shield user") + run('%s; %s useradd es_admin -r admin -p foobar' % (java_exe(), es_shield_path), verbose=verbose) + else: + headers = {} + print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release)) + try: + run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.script.inline=on -Des.script.indexed=on -Des.repositories.url.allowed_urls=http://snapshot.test* %s -Des.pidfile=%s' + % (java_exe(), es_run_path, '-d', os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'es-smoke.pid')), verbose=verbose) + conn = HTTPConnection(host='127.0.0.1', port=9200, timeout=20) + if not wait_for_node_startup(header=headers): + print("elasticsearch logs:") + print('*' * 80) + logs = read_fully(os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'logs/prepare_release.log')) + print(logs) + print('*' * 80) + raise RuntimeError('server didn\'t start up') + try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins + conn.request('GET', '', headers=headers) + res = conn.getresponse() + if res.status == 200: + version = json.loads(res.read().decode("utf-8"))['version'] + if release != version['number']: + raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number'])) + if version['build_snapshot']: + raise RuntimeError('Expected non snapshot version') + if expected_hash.startswith(version['build_hash'].strip()): + raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash'])) + print(' Verify if plugins are listed in _nodes') + conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers) + res = conn.getresponse() + if res.status == 200: + nodes = json.loads(res.read().decode("utf-8"))['nodes'] + for _, node in nodes.items(): + node_plugins = node['plugins'] + for node_plugin in node_plugins: + if not plugin_names.get(node_plugin['name'].strip(), False): + raise RuntimeError('Unexpeced plugin %s' % node_plugin['name']) + del plugin_names[node_plugin['name']] + if plugin_names: + raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys())) + + else: + raise RuntimeError('Expected HTTP 200 but got %s' % res.status) + else: + raise RuntimeError('Expected HTTP 200 but got %s' % res.status) + finally: + conn.close() + finally: + pid_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'es-smoke.pid') + if os.path.exists(pid_path): # try reading the pid and kill the node + pid = int(read_fully(pid_path)) + os.kill(pid, signal.SIGKILL) + shutil.rmtree(tmp_dir) + print(' ' + '*' * 80) + print() + + +def parse_list(string): + return [x.strip() for x in string.split(',')] + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo') + parser.add_argument('--version', '-v', dest='version', default=None, + help='The Elasticsearch Version to smoke-tests', required=True) + parser.add_argument('--hash', '-s', dest='hash', default=None, required=True, + help='The sha1 short hash of the git commit to smoketest') + parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list, + help='A list of additional plugins to smoketest') + parser.add_argument('--verbose', '-b', dest='verbose', + help='Runs the script in verbose mode') + parser.set_defaults(hash=None) + parser.set_defaults(plugins=[]) + parser.set_defaults(version=None) + parser.set_defaults(verbose=False) + args = parser.parse_args() + plugins = args.plugins + version = args.version + hash = args.hash + verbose = args.verbose + files = [ + 'org/elasticsearch/distribution/tar/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.tar.gz', + 'org/elasticsearch/distribution/zip/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.zip', + 'org/elasticsearch/distribution/deb/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.deb', + 'org/elasticsearch/distribution/rpm/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.rpm' + ] + verify_java_version('1.7') + download_and_verify(version, hash, files, plugins= DEFAULT_PLUGINS + plugins, verbose=verbose) + + + diff --git a/distribution/licenses/joda-time-2.8.2.jar.sha1 b/distribution/licenses/joda-time-2.8.2.jar.sha1 new file mode 100644 index 00000000000..cf1e6a935c7 --- /dev/null +++ b/distribution/licenses/joda-time-2.8.2.jar.sha1 @@ -0,0 +1 @@ +d27c24204c5e507b16fec01006b3d0f1ec42aed4 diff --git a/distribution/licenses/joda-time-2.8.jar.sha1 b/distribution/licenses/joda-time-2.8.jar.sha1 deleted file mode 100644 index 4e181bf932f..00000000000 --- a/distribution/licenses/joda-time-2.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f2785d7184b97d005a44241ccaf980f43b9ccdb diff --git a/distribution/pom.xml b/distribution/pom.xml index 41a17293f45..4a22d12f458 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -90,6 +90,15 @@ + + org.apache.maven.plugins + maven-eclipse-plugin + + + [groupId].[artifactId] + + diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc index 4a747b70782..cfc45b7bc4b 100644 --- a/docs/java-api/client.asciidoc +++ b/docs/java-api/client.asciidoc @@ -152,8 +152,8 @@ be "two hop" operations). // on startup Client client = TransportClient.builder().build() - .addTransportAddress(new InetSocketTransportAddress("host1", 9300)) - .addTransportAddress(new InetSocketTransportAddress("host2", 9300)); + .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host1"), 9300)) + .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host2"), 9300)); // on shutdown diff --git a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc index 8311f58cdf3..bbd540dc01f 100644 --- a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc @@ -52,13 +52,13 @@ The following snippet calculates the average of the total monthly `sales`: }, "avg_monthly_sales": { "avg_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the +<1> `buckets_path` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index 3a5a3c5731b..ca09a87d1c0 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -1,7 +1,7 @@ [[indices-templates]] == Index Templates -Index templates allow to define templates that will automatically be +Index templates allow you to define templates that will automatically be applied to new indices created. The templates include both settings and mappings, and a simple pattern template that controls if the template will be applied to the index created. For example: diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 0a8f3682b72..1cc07e21313 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -3,7 +3,7 @@ Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special -field name for `_score` to sort by score. +field name for `_score` to sort by score, and `_doc` to sort by index order. [source,js] -------------------------------------------------- @@ -21,6 +21,10 @@ field name for `_score` to sort by score. } -------------------------------------------------- +NOTE: `_doc` has no real use-case besides being the most efficient sort order. +So if you don't care about the order in which documents are returned, then you +should sort by `_doc`. This especially helps when <>. + ==== Sort Values The sort values for each document returned are also returned as part of diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-core-1.10.0.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-core-1.10.0.jar.sha1 deleted file mode 100644 index c9a6d32ab96..00000000000 --- a/plugins/cloud-aws/licenses/aws-java-sdk-core-1.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9732a4e80aad23101faa442700c2172a37257c43 diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-core-1.10.12.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-core-1.10.12.jar.sha1 new file mode 100644 index 00000000000..659b6cc62f5 --- /dev/null +++ b/plugins/cloud-aws/licenses/aws-java-sdk-core-1.10.12.jar.sha1 @@ -0,0 +1 @@ +7ff51040bbcc9085dcb9a24a2c2a3cc7ac995988 diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-ec2-1.10.0.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-ec2-1.10.0.jar.sha1 deleted file mode 100644 index 4c132113999..00000000000 --- a/plugins/cloud-aws/licenses/aws-java-sdk-ec2-1.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b5dc3760021fba0ae67b4f11d37ffa52a4eac4f4 diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-ec2-1.10.12.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-ec2-1.10.12.jar.sha1 new file mode 100644 index 00000000000..60bae7e37ee --- /dev/null +++ b/plugins/cloud-aws/licenses/aws-java-sdk-ec2-1.10.12.jar.sha1 @@ -0,0 +1 @@ +b0712cc659e72b9da0f5b03872d2476ab4a695f7 diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-kms-1.10.0.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-kms-1.10.0.jar.sha1 deleted file mode 100644 index 02adba33c64..00000000000 --- a/plugins/cloud-aws/licenses/aws-java-sdk-kms-1.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48f0aab551fa9e2eb4c81e2debf40e9fff595405 diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-kms-1.10.12.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-kms-1.10.12.jar.sha1 new file mode 100644 index 00000000000..1948b0d2b01 --- /dev/null +++ b/plugins/cloud-aws/licenses/aws-java-sdk-kms-1.10.12.jar.sha1 @@ -0,0 +1 @@ +31afbe46b65e9933316c7e8dfb8b88dc4b37b6ba diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-s3-1.10.0.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-s3-1.10.0.jar.sha1 deleted file mode 100644 index a76faf74611..00000000000 --- a/plugins/cloud-aws/licenses/aws-java-sdk-s3-1.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -891e42d29e8f9474f83c050e4ee6a4512d4f4e71 diff --git a/plugins/cloud-aws/licenses/aws-java-sdk-s3-1.10.12.jar.sha1 b/plugins/cloud-aws/licenses/aws-java-sdk-s3-1.10.12.jar.sha1 new file mode 100644 index 00000000000..9814735f4d7 --- /dev/null +++ b/plugins/cloud-aws/licenses/aws-java-sdk-s3-1.10.12.jar.sha1 @@ -0,0 +1 @@ +c9e2593fdf398c5f8906a704db037d17b2de4b2a diff --git a/plugins/cloud-aws/pom.xml b/plugins/cloud-aws/pom.xml index 157a35077d8..7d0ea71940a 100644 --- a/plugins/cloud-aws/pom.xml +++ b/plugins/cloud-aws/pom.xml @@ -16,7 +16,7 @@ org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin - 1.10.0 + 1.10.12 1 cloud_aws false diff --git a/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java b/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java index 0f52a318685..24c7196c681 100644 --- a/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java +++ b/plugins/cloud-aws/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java @@ -34,7 +34,6 @@ import java.io.InputStream; import java.net.URL; import java.util.Date; import java.util.List; - import org.elasticsearch.common.SuppressForbidden; /** @@ -579,4 +578,54 @@ public class AmazonS3Wrapper implements AmazonS3 { public boolean isRequesterPaysEnabled(String bucketName) throws AmazonServiceException, AmazonClientException { return delegate.isRequesterPaysEnabled(bucketName); } + + @Override + public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest) throws AmazonClientException, AmazonServiceException { + return delegate.listNextBatchOfObjects(listNextBatchOfObjectsRequest); + } + + @Override + public VersionListing listNextBatchOfVersions(ListNextBatchOfVersionsRequest listNextBatchOfVersionsRequest) throws AmazonClientException, AmazonServiceException { + return delegate.listNextBatchOfVersions(listNextBatchOfVersionsRequest); + } + + @Override + public Owner getS3AccountOwner(GetS3AccountOwnerRequest getS3AccountOwnerRequest) throws AmazonClientException, AmazonServiceException { + return delegate.getS3AccountOwner(getS3AccountOwnerRequest); + } + + @Override + public BucketLoggingConfiguration getBucketLoggingConfiguration(GetBucketLoggingConfigurationRequest getBucketLoggingConfigurationRequest) throws AmazonClientException, AmazonServiceException { + return delegate.getBucketLoggingConfiguration(getBucketLoggingConfigurationRequest); + } + + @Override + public BucketVersioningConfiguration getBucketVersioningConfiguration(GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest) throws AmazonClientException, AmazonServiceException { + return delegate.getBucketVersioningConfiguration(getBucketVersioningConfigurationRequest); + } + + @Override + public BucketLifecycleConfiguration getBucketLifecycleConfiguration(GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest) { + return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest); + } + + @Override + public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest) { + return delegate.getBucketCrossOriginConfiguration(getBucketCrossOriginConfigurationRequest); + } + + @Override + public BucketTaggingConfiguration getBucketTaggingConfiguration(GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest) { + return delegate.getBucketTaggingConfiguration(getBucketTaggingConfigurationRequest); + } + + @Override + public BucketNotificationConfiguration getBucketNotificationConfiguration(GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest) throws AmazonClientException, AmazonServiceException { + return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest); + } + + @Override + public BucketReplicationConfiguration getBucketReplicationConfiguration(GetBucketReplicationConfigurationRequest getBucketReplicationConfigurationRequest) throws AmazonServiceException, AmazonClientException { + return delegate.getBucketReplicationConfiguration(getBucketReplicationConfigurationRequest); + } } diff --git a/pom.xml b/pom.xml index 13bda58932d..84cb4af6eac 100644 --- a/pom.xml +++ b/pom.xml @@ -354,7 +354,7 @@ joda-time - 2.8 + 2.8.2 org.joda @@ -875,10 +875,16 @@ 2.4.1 - org.apache.maven.plugins maven-resources-plugin 2.7 + + + + ico + + org.apache.maven.plugins @@ -976,7 +982,7 @@ org.apache.maven.plugins maven-eclipse-plugin - 2.9 + 2.10 eclipse-build true diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats index b5daaece1d4..61210d2df68 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats @@ -31,37 +31,31 @@ # Load test utilities load packaging_test_utils -# Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_tar_gz } ################################## # Install TAR GZ package ################################## @test "[TAR] tar command is available" { - skip_not_tar_gz + # Cleans everything for the 1st execution + clean_before_test run tar --version [ "$status" -eq 0 ] } @test "[TAR] archive is available" { - skip_not_tar_gz count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l) [ "$count" -eq 1 ] } @test "[TAR] archive is not installed" { - skip_not_tar_gz count=$(find /tmp -type d -name 'elasticsearch*' | wc -l) [ "$count" -eq 0 ] } @test "[TAR] install archive" { - skip_not_tar_gz - # Install the archive install_archive @@ -73,8 +67,6 @@ setup() { # Check that the archive is correctly installed ################################## @test "[TAR] verify archive installation" { - skip_not_tar_gz - verify_archive_installation "/tmp/elasticsearch" } @@ -82,14 +74,11 @@ setup() { # Check that Elasticsearch is working ################################## @test "[TAR] test elasticsearch" { - skip_not_tar_gz - start_elasticsearch_service run_elasticsearch_tests stop_elasticsearch_service - run rm -rf "/tmp/elasticsearch" - [ "$status" -eq 0 ] + rm -rf "/tmp/elasticsearch" } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index c3773b557ef..cbb68389cc0 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -242,34 +242,27 @@ install_archive() { eshome="$1" fi - run tar -xzvf elasticsearch*.tar.gz -C "$eshome" >&2 - [ "$status" -eq 0 ] + tar -xzvf elasticsearch*.tar.gz -C "$eshome" - run find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \; - [ "$status" -eq 0 ] + find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \; # ES cannot run as root so create elasticsearch user & group if needed if ! getent group "elasticsearch" > /dev/null 2>&1 ; then if is_dpkg; then - run addgroup --system "elasticsearch" - [ "$status" -eq 0 ] + addgroup --system "elasticsearch" else - run groupadd -r "elasticsearch" - [ "$status" -eq 0 ] + groupadd -r "elasticsearch" fi fi if ! id "elasticsearch" > /dev/null 2>&1 ; then if is_dpkg; then - run adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch" - [ "$status" -eq 0 ] + adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch" else - run useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch" - [ "$status" -eq 0 ] + useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch" fi fi - run chown -R elasticsearch:elasticsearch "$eshome/elasticsearch" - [ "$status" -eq 0 ] + chown -R elasticsearch:elasticsearch "$eshome/elasticsearch" } @@ -354,11 +347,12 @@ clean_before_test() { } start_elasticsearch_service() { - if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then - run /bin/su -s /bin/sh -c '/tmp/elasticsearch/bin/elasticsearch -d -p /tmp/elasticsearch/elasticsearch.pid' elasticsearch - [ "$status" -eq 0 ] - + # su and the Elasticsearch init script work together to break bats. + # sudo isolates bats enough from the init script so everything continues + # to tick along + sudo -u elasticsearch /tmp/elasticsearch/bin/elasticsearch -d \ + -p /tmp/elasticsearch/elasticsearch.pid elif is_systemd; then run systemctl daemon-reload [ "$status" -eq 0 ] @@ -383,9 +377,8 @@ start_elasticsearch_service() { pid=$(cat /tmp/elasticsearch/elasticsearch.pid) [ "x$pid" != "x" ] && [ "$pid" -gt 0 ] - run ps $pid - [ "$status" -eq 0 ] - + echo "Looking for elasticsearch pid...." + ps $pid elif is_systemd; then run systemctl is-active elasticsearch.service [ "$status" -eq 0 ] @@ -400,14 +393,11 @@ start_elasticsearch_service() { } stop_elasticsearch_service() { - if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then pid=$(cat /tmp/elasticsearch/elasticsearch.pid) [ "x$pid" != "x" ] && [ "$pid" -gt 0 ] - run kill -SIGTERM $pid - [ "$status" -eq 0 ] - + kill -SIGTERM $pid elif is_systemd; then run systemctl stop elasticsearch.service [ "$status" -eq 0 ] @@ -428,36 +418,63 @@ stop_elasticsearch_service() { # Waits for Elasticsearch to reach a given status (defaults to "green") wait_for_elasticsearch_status() { - local status="green" + local desired_status="green" if [ "x$1" != "x" ]; then status="$1" fi - # Try to connect to elasticsearch and wait for expected status - wget --quiet --retry-connrefused --waitretry=1 --timeout=60 \ - --output-document=/dev/null "http://localhost:9200/_cluster/health?wait_for_status=$status&timeout=60s" || true + echo "Making sure elasticsearch is up..." + wget -O - --retry-connrefused --waitretry=1 --timeout=60 http://localhost:9200 || { + echo "Looks like elasticsearch never started. Here is its log:" + if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then + cat /tmp/elasticsearch/log/elasticsearch.log + else + if [ -e '/var/log/elasticsearch/elasticsearch.log' ]; then + cat /var/log/elasticsearch/elasticsearch.log + else + echo "The elasticsearch log doesn't exist. Maybe /vag/log/messages has something:" + tail -n20 /var/log/messages + fi + fi + false + } - # Checks the cluster health - curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false' - if [ $? -ne 0 ]; then - echo "error when checking cluster health" >&2 - exit 1 + echo "Tring to connect to elasticsearch and wait for expected status..." + curl -sS "http://localhost:9200/_cluster/health?wait_for_status=$desired_status&timeout=60s&pretty" + if [ $? -eq 0 ]; then + echo "Connected" + else + echo "Unable to connect to Elastisearch" + false fi + + echo "Checking that the cluster health matches the waited for status..." + run curl -sS -XGET 'http://localhost:9200/_cat/health?h=status&v=false' + if [ "$status" -ne 0 ]; then + echo "error when checking cluster health. code=$status output=" + echo $output + false + fi + echo $output | grep $desired_status || { + echo "unexpected status: '$output' wanted '$desired_status'" + false + } } # Executes some very basic Elasticsearch tests run_elasticsearch_tests() { + # TODO this assertion is the same the one made when waiting for + # elasticsearch to start run curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false' [ "$status" -eq 0 ] echo "$output" | grep -w "green" - run curl -XPOST 'http://localhost:9200/library/book/1?refresh=true' -d '{"title": "Elasticsearch - The Definitive Guide"}' 2>&1 - [ "$status" -eq 0 ] + curl -s -XPOST 'http://localhost:9200/library/book/1?refresh=true&pretty' -d '{ + "title": "Elasticsearch - The Definitive Guide" + }' - run curl -XGET 'http://localhost:9200/_cat/count?h=count&v=false' - [ "$status" -eq 0 ] - echo "$output" | grep -w "1" + curl -s -XGET 'http://localhost:9200/_cat/count?h=count&v=false&pretty' | + grep -w "1" - run curl -XDELETE 'http://localhost:9200/_all' - [ "$status" -eq 0 ] + curl -s -XDELETE 'http://localhost:9200/_all' }