Merge branch 'master' into feature/query-refactoring

Conflicts:
	core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java
This commit is contained in:
Christoph Büscher 2015-08-25 11:59:40 +02:00
commit 59cb67c7bd
37 changed files with 611 additions and 157 deletions

View File

@ -75,16 +75,18 @@ class JNANatives {
}
// mlockall failed for some reason
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg + ". This can result in part of the JVM being swapped out.");
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
logger.warn("This can result in part of the JVM being swapped out.");
if (errno == JNACLibrary.ENOMEM) {
if (rlimitSuccess) {
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
if (Constants.LINUX) {
// give specific instructions for the linux case to make it easy
String user = System.getProperty("user.name");
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
"\t# allow user 'esuser' mlockall\n" +
"\tesuser soft memlock unlimited\n" +
"\tesuser hard memlock unlimited"
"\t# allow user '" + user + "' mlockall\n" +
"\t" + user + " soft memlock unlimited\n" +
"\t" + user + " hard memlock unlimited"
);
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.IntSet;
import com.google.common.base.Predicate;
import com.google.common.collect.*;
import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import java.util.ArrayList;
@ -162,28 +162,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
* iterator contains a single ShardRouting pointing at the relocating target
*/
public GroupShardsIterator allActiveShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<>();
for (String index : indices) {
IndexRoutingTable indexRoutingTable = index(index);
if (indexRoutingTable == null) {
continue;
// we simply ignore indices that don't exists (make sense for operations that use it currently)
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.active()) {
set.add(shardRouting.shardsIt());
if (includeRelocationTargets && shardRouting.relocating()) {
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.buildTargetRelocatingShard())));
}
} else if (includeEmpty) { // we need this for counting properly, just make it an empty one
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.<ShardRouting>emptyList()));
}
}
}
}
return new GroupShardsIterator(set);
return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ACTIVE_PREDICATE);
}
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty) {
@ -198,6 +177,25 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
* iterator contains a single ShardRouting pointing at the relocating target
*/
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) {
return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ASSIGNED_PREDICATE);
}
private static Predicate<ShardRouting> ACTIVE_PREDICATE = new Predicate<ShardRouting>() {
@Override
public boolean apply(ShardRouting shardRouting) {
return shardRouting.active();
}
};
private static Predicate<ShardRouting> ASSIGNED_PREDICATE = new Predicate<ShardRouting>() {
@Override
public boolean apply(ShardRouting shardRouting) {
return shardRouting.assignedToNode();
}
};
// TODO: replace with JDK 8 native java.util.function.Predicate
private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate<ShardRouting> predicate) {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<>();
for (String index : indices) {
@ -208,7 +206,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
}
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
if (shardRouting.assignedToNode()) {
if (predicate.apply(shardRouting)) {
set.add(shardRouting.shardsIt());
if (includeRelocationTargets && shardRouting.relocating()) {
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.buildTargetRelocatingShard())));

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.settings.loader;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.io.stream.StreamInput;
@ -36,7 +37,7 @@ public class PropertiesSettingsLoader implements SettingsLoader {
@Override
public Map<String, String> load(String source) throws IOException {
Properties props = new Properties();
Properties props = new NoDuplicatesProperties();
FastStringReader reader = new FastStringReader(source);
try {
props.load(reader);
@ -52,7 +53,7 @@ public class PropertiesSettingsLoader implements SettingsLoader {
@Override
public Map<String, String> load(byte[] source) throws IOException {
Properties props = new Properties();
Properties props = new NoDuplicatesProperties();
StreamInput stream = StreamInput.wrap(source);
try {
props.load(stream);
@ -65,4 +66,15 @@ public class PropertiesSettingsLoader implements SettingsLoader {
IOUtils.closeWhileHandlingException(stream);
}
}
class NoDuplicatesProperties extends Properties {
@Override
public synchronized Object put(Object key, Object value) {
Object previousValue = super.put(key, value);
if (previousValue != null) {
throw new ElasticsearchParseException("duplicate settings key [{}] found, previous value [{}], current value [{}]", key, previousValue, value);
}
return previousValue;
}
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.settings.loader;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
@ -141,7 +140,18 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
sb.append(pathEle).append('.');
}
sb.append(fieldName);
settings.put(sb.toString(), parser.text());
String key = sb.toString();
String currentValue = parser.text();
String previousValue = settings.put(key, currentValue);
if (previousValue != null) {
throw new ElasticsearchParseException(
"duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]",
key,
parser.getTokenLocation().lineNumber,
parser.getTokenLocation().columnNumber,
previousValue,
currentValue
);
}
}
}

View File

@ -43,12 +43,10 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.*;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
@ -253,10 +251,9 @@ public abstract class MetaDataStateFormat<T> {
if (dataLocations != null) { // select all eligable files first
for (Path dataLocation : dataLocations) {
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
if (!Files.isDirectory(stateDir)) {
continue;
}
// now, iterate over the current versions, and find latest one
// we don't check if the stateDir is present since it could be deleted
// after the check. Also if there is a _state file and it's not a dir something is really wrong
try (DirectoryStream<Path> paths = Files.newDirectoryStream(stateDir)) { // we don't pass a glob since we need the group part for parsing
for (Path stateFile : paths) {
final Matcher matcher = stateFilePattern.matcher(stateFile.getFileName().toString());
@ -270,6 +267,8 @@ public abstract class MetaDataStateFormat<T> {
files.add(pav);
}
}
} catch (NoSuchFileException | FileNotFoundException ex) {
// no _state directory -- move on
}
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.http;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -30,6 +31,7 @@ import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.rest.*;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashMap;
@ -114,10 +116,14 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
}
public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) {
if (request.rawPath().startsWith("/_plugin/")) {
String rawPath = request.rawPath();
if (rawPath.startsWith("/_plugin/")) {
RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
filterChain.continueProcessing(request, channel);
return;
} else if (rawPath.equals("/favicon.ico")) {
handleFavicon(request, channel);
return;
}
restController.dispatchRequest(request, channel);
}
@ -131,6 +137,22 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
}
}
void handleFavicon(HttpRequest request, HttpChannel channel) {
if (request.method() == RestRequest.Method.GET) {
try {
try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) {
byte[] content = ByteStreams.toByteArray(stream);
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", content);
channel.sendResponse(restResponse);
}
} catch (IOException e) {
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR));
}
} else {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
}
}
void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException {
if (disableSites) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));

View File

@ -62,8 +62,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
private String minimumShouldMatch;
private String rewrite = null;
private String fuzzyRewrite = null;
private Boolean useDisMax;
@ -246,11 +244,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
return this;
}
public MultiMatchQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
public MultiMatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
this.fuzzyRewrite = fuzzyRewrite;
return this;
@ -347,9 +340,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
if (minimumShouldMatch != null) {
builder.field("minimum_should_match", minimumShouldMatch);
}
if (rewrite != null) {
builder.field("rewrite", rewrite);
}
if (fuzzyRewrite != null) {
builder.field("fuzzy_rewrite", fuzzyRewrite);
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.query.support;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.join.BitDocIdSetFilter;
import org.elasticsearch.common.bytes.BytesReference;
@ -55,7 +56,7 @@ public class NestedInnerQueryParseSupport {
protected boolean filterFound = false;
protected BitDocIdSetFilter parentFilter;
protected BitDocIdSetFilter childFilter;
protected Filter childFilter;
protected ObjectMapper nestedObjectMapper;
private ObjectMapper parentObjectMapper;
@ -195,7 +196,7 @@ public class NestedInnerQueryParseSupport {
} else {
parentFilter = shardContext.bitsetFilter(objectMapper.nestedTypeFilter());
}
childFilter = shardContext.bitsetFilter(nestedObjectMapper.nestedTypeFilter());
childFilter = nestedObjectMapper.nestedTypeFilter();
parentObjectMapper = shardContext.nestedScope().nextLevel(nestedObjectMapper);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -8,31 +8,26 @@ SYNOPSIS
DESCRIPTION
This command installs an elasticsearch plugin
This command installs an elasticsearch plugin. It can be used as follows:
The argument can be a <name> of one of the official plugins, or refer to a github repository
Officially supported or commercial plugins require just the plugin name:
The notation of just specifying a plugin name, downloads an officially supported plugin.
plugin install analysis-icu
plugin install shield
The notation of 'elasticsearch/plugin/version' allows to easily download a commercial elastic plugin.
The notation of 'groupId/artifactId/version' refers to community plugins using maven central or sonatype
The notation of 'username/repository' refers to a github repository.
The argument can be an valid <url> which points to a download or file location for the plugin to be loaded from.
EXAMPLES
plugin install analysis-kuromoji
plugin install elasticsearch/shield/latest
Plugins from GitHub require 'username/repository' or 'username/repository/version':
plugin install lmenezes/elasticsearch-kopf
plugin install lmenezes/elasticsearch-kopf/1.5.7
plugin install http://download.elasticsearch.org/elasticsearch/elasticsearch-analysis-kuromoji/elasticsearch-analysis-kuromoji-2.7.0.zip
Plugins from Maven Central or Sonatype require 'groupId/artifactId/version':
plugin install file:/path/to/plugin/elasticsearch-analysis-kuromoji-2.7.0.zip
plugin install org.elasticsearch/elasticsearch-mapper-attachments/2.6.0
Plugins can be installed from a custom URL or file location as follows:
plugin install http://some.domain.name//my-plugin-1.0.0.zip
plugin install file:/path/to/my-plugin-1.0.0.zip
OFFICIAL PLUGINS

View File

@ -19,19 +19,19 @@
package org.elasticsearch.common.settings.loader;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
/**
*
*/
public class JsonSettingsLoaderTests extends ESTestCase {
@Test
public void testSimpleJsonSettings() throws Exception {
String json = "/org/elasticsearch/common/settings/loader/test-settings.json";
@ -50,4 +50,17 @@ public class JsonSettingsLoaderTests extends ESTestCase {
assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
}
public void testDuplicateKeysThrowsException() {
String json = "{\"foo\":\"bar\",\"foo\":\"baz\"}";
try {
settingsBuilder()
.loadFromSource(json)
.build();
fail("expected exception");
} catch (SettingsException e) {
assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [1], column number [13], previous value [bar], current value [baz]"));
}
}
}

View File

@ -0,0 +1,47 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.settings.loader;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.nio.charset.Charset;
public class PropertiesSettingsLoaderTests extends ESTestCase {
public void testDuplicateKeyFromStringThrowsException() throws IOException {
PropertiesSettingsLoader loader = new PropertiesSettingsLoader();
try {
loader.load("foo=bar\nfoo=baz");
fail("expected exception");
} catch (ElasticsearchParseException e) {
assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]");
}
}
public void testDuplicateKeysFromBytesThrowsException() throws IOException {
PropertiesSettingsLoader loader = new PropertiesSettingsLoader();
try {
loader.load("foo=bar\nfoo=baz".getBytes(Charset.defaultCharset()));
} catch (ElasticsearchParseException e) {
assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]");
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common.settings.loader;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.test.ESTestCase;
@ -31,7 +32,6 @@ import static org.hamcrest.Matchers.equalTo;
*
*/
public class YamlSettingsLoaderTests extends ESTestCase {
@Test
public void testSimpleYamlSettings() throws Exception {
String yaml = "/org/elasticsearch/common/settings/loader/test-settings.yml";
@ -66,4 +66,17 @@ public class YamlSettingsLoaderTests extends ESTestCase {
.loadFromStream(yaml, getClass().getResourceAsStream(yaml))
.build();
}
public void testDuplicateKeysThrowsException() {
String yaml = "foo: bar\nfoo: baz";
try {
settingsBuilder()
.loadFromSource(yaml)
.build();
fail("expected exception");
} catch (SettingsException e) {
assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [2], column number [6], previous value [bar], current value [baz]"));
}
}
}

View File

@ -2437,7 +2437,7 @@ public class SimpleIndexQueryParserTests extends ESSingleNodeTestCase {
Query parsedQuery = queryParser.parse(query).query();
assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(ToParentBlockJoinQuery.class));
assertThat(((ConstantScoreQuery) parsedQuery).getQuery().toString(), equalTo("ToParentBlockJoinQuery (+*:* #random_access(QueryWrapperFilter(_type:__nested)))"));
assertThat(((ConstantScoreQuery) parsedQuery).getQuery().toString(), equalTo("ToParentBlockJoinQuery (+*:* #QueryWrapperFilter(_type:__nested))"));
SearchContext.removeCurrent();
}

View File

@ -6,12 +6,13 @@
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
<listEntry value="1"/>
</listAttribute>
<mapAttribute key="org.eclipse.debug.core.environmentVariables">
<mapEntry key="ES_HOME" value="${target_home}"/>
</mapAttribute>
<stringAttribute key="org.eclipse.jdt.launching.CLASSPATH_PROVIDER" value="org.eclipse.m2e.launchconfig.classpathProvider"/>
<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
</listAttribute>
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.elasticsearch.bootstrap.Elasticsearch"/>
<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="start"/>
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="elasticsearch"/>
<stringAttribute key="org.eclipse.jdt.launching.SOURCE_PATH_PROVIDER" value="org.eclipse.m2e.launchconfig.sourcepathProvider"/>
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms256m -Xmx1g -Djava.awt.headless=true -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=logs/heapdump.hprof -Delasticsearch -Des.foreground=yes -ea"/>
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms256m -Xmx1g -Djava.awt.headless=true -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=logs/heapdump.hprof -Delasticsearch -Des.foreground=yes -ea -Des.path.home=target/eclipse_run -Des.security.manager.enabled=false"/>
</launchConfiguration>

View File

@ -23,7 +23,7 @@
#
# python3 ./dev-tools/prepare-release.py
#
# Note: Ensure the script is run from the root directory
# Note: Ensure the script is run from the elasticsearch top level directory
#
import fnmatch
@ -83,12 +83,14 @@ enabled=1
[4] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm
[5] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb
"""
def run(command, env_vars=None):
VERBOSE=True
def run(command, env_vars=None, verbose=VERBOSE):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
if os.system('%s' % (command)):
if not verbose:
command = '%s >> /dev/null 2>&1' % (command)
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
@ -181,16 +183,20 @@ if __name__ == "__main__":
help='Only runs a maven install to skip the remove deployment step')
parser.add_argument('--gpg-key', '-k', dest='gpg_key', default="D88E42B4",
help='Allows you to specify a different gpg_key to be used instead of the default release key')
parser.add_argument('--verbose', '-b', dest='verbose',
help='Runs the script in verbose mode')
parser.set_defaults(deploy=False)
parser.set_defaults(skip_doc_check=False)
parser.set_defaults(push=False)
parser.set_defaults(install_only=False)
parser.set_defaults(verbose=False)
args = parser.parse_args()
install_and_deploy = args.deploy
skip_doc_check = args.skip_doc_check
push = args.push
gpg_key = args.gpg_key
install_only = args.install_only
VERBOSE = args.verbose
ensure_checkout_is_clean()
release_version = find_release_version()

271
dev-tools/smoke_test_rc.py Normal file
View File

@ -0,0 +1,271 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.7
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_tests_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_tests_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
import argparse
import tempfile
import os
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from prepare_release_candidate import run
from http.client import HTTPConnection
DEFAULT_PLUGINS = ["analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"cloud-aws",
"cloud-azure",
"cloud-gce",
"delete-by-query",
"discovery-multicast",
"lang-javascript",
"lang-python",
"mapper-murmur3",
"mapper-size"]
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(host='127.0.0.1', port=9200, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = HTTPConnection(host=host, port=port, timeout=timeout)
try:
time.sleep(1)
conn.request('GET', '', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
pass
#that is ok it might not be there yet
finally:
conn.close()
return False
def download_and_verify(version, hash, files, base_url='http://download.elasticsearch.org/elasticsearch/staging', plugins=DEFAULT_PLUGINS, verbose=False):
base_url = '%s/%s-%s' % (base_url, version, hash)
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
os.makedirs(current_artifact_dir)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(current_artifact_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pgp.mit.edu --recv-key D88E42B4' % gpg_home_dir, verbose=verbose)
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)), verbose=verbose)
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins, verbose=verbose)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins, verbose=False):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir), verbose=verbose)
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir), verbose=verbose)
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; %s -Des.plugins.staging=true %s %s' % (java_exe(), es_plugin_path, 'install', plugin), verbose=verbose)
plugin_names[plugin] = True
if 'shield' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/shield/esusers')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r admin -p foobar' % (java_exe(), es_shield_path), verbose=verbose)
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
try:
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.script.inline=on -Des.script.indexed=on -Des.repositories.url.allowed_urls=http://snapshot.test* %s -Des.pidfile=%s'
% (java_exe(), es_run_path, '-d', os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'es-smoke.pid')), verbose=verbose)
conn = HTTPConnection(host='127.0.0.1', port=9200, timeout=20)
if not wait_for_node_startup(header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
conn.request('GET', '', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if expected_hash.startswith(version['build_hash'].strip()):
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The sha1 short hash of the git commit to smoketest')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--verbose', '-b', dest='verbose',
help='Runs the script in verbose mode')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(verbose=False)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
verbose = args.verbose
files = [
'org/elasticsearch/distribution/tar/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.tar.gz',
'org/elasticsearch/distribution/zip/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.zip',
'org/elasticsearch/distribution/deb/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.deb',
'org/elasticsearch/distribution/rpm/elasticsearch/2.0.0-beta1/elasticsearch-2.0.0-beta1.rpm'
]
verify_java_version('1.7')
download_and_verify(version, hash, files, plugins= DEFAULT_PLUGINS + plugins, verbose=verbose)

View File

@ -0,0 +1 @@
d27c24204c5e507b16fec01006b3d0f1ec42aed4

View File

@ -1 +0,0 @@
9f2785d7184b97d005a44241ccaf980f43b9ccdb

View File

@ -90,6 +90,15 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-eclipse-plugin</artifactId>
<configuration>
<!-- Many of the modules in this build have the artifactId "elasticsearch"
which break importing into Eclipse without this. -->
<projectNameTemplate>[groupId].[artifactId]</projectNameTemplate>
</configuration>
</plugin>
</plugins>
<pluginManagement>

View File

@ -152,8 +152,8 @@ be "two hop" operations).
// on startup
Client client = TransportClient.builder().build()
.addTransportAddress(new InetSocketTransportAddress("host1", 9300))
.addTransportAddress(new InetSocketTransportAddress("host2", 9300));
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host1"), 9300))
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host2"), 9300));
// on shutdown

View File

@ -52,13 +52,13 @@ The following snippet calculates the average of the total monthly `sales`:
},
"avg_monthly_sales": {
"avg_bucket": {
"buckets_paths": "sales_per_month>sales" <1>
"buckets_path": "sales_per_month>sales" <1>
}
}
}
}
--------------------------------------------------
<1> `bucket_paths` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the
<1> `buckets_path` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the
`sales_per_month` date histogram.
And the following may be the response:

View File

@ -1,7 +1,7 @@
[[indices-templates]]
== Index Templates
Index templates allow to define templates that will automatically be
Index templates allow you to define templates that will automatically be
applied to new indices created. The templates include both settings and
mappings, and a simple pattern template that controls if the template
will be applied to the index created. For example:

View File

@ -3,7 +3,7 @@
Allows to add one or more sort on specific fields. Each sort can be
reversed as well. The sort is defined on a per field level, with special
field name for `_score` to sort by score.
field name for `_score` to sort by score, and `_doc` to sort by index order.
[source,js]
--------------------------------------------------
@ -21,6 +21,10 @@ field name for `_score` to sort by score.
}
--------------------------------------------------
NOTE: `_doc` has no real use-case besides being the most efficient sort order.
So if you don't care about the order in which documents are returned, then you
should sort by `_doc`. This especially helps when <<search-request-scroll,scrolling>>.
==== Sort Values
The sort values for each document returned are also returned as part of

View File

@ -1 +0,0 @@
9732a4e80aad23101faa442700c2172a37257c43

View File

@ -0,0 +1 @@
7ff51040bbcc9085dcb9a24a2c2a3cc7ac995988

View File

@ -1 +0,0 @@
b5dc3760021fba0ae67b4f11d37ffa52a4eac4f4

View File

@ -0,0 +1 @@
b0712cc659e72b9da0f5b03872d2476ab4a695f7

View File

@ -1 +0,0 @@
48f0aab551fa9e2eb4c81e2debf40e9fff595405

View File

@ -0,0 +1 @@
31afbe46b65e9933316c7e8dfb8b88dc4b37b6ba

View File

@ -1 +0,0 @@
891e42d29e8f9474f83c050e4ee6a4512d4f4e71

View File

@ -0,0 +1 @@
c9e2593fdf398c5f8906a704db037d17b2de4b2a

View File

@ -16,7 +16,7 @@
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin</elasticsearch.plugin.classname>
<amazonaws.version>1.10.0</amazonaws.version>
<amazonaws.version>1.10.12</amazonaws.version>
<tests.jvms>1</tests.jvms>
<tests.rest.suite>cloud_aws</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>

View File

@ -34,7 +34,6 @@ import java.io.InputStream;
import java.net.URL;
import java.util.Date;
import java.util.List;
import org.elasticsearch.common.SuppressForbidden;
/**
@ -579,4 +578,54 @@ public class AmazonS3Wrapper implements AmazonS3 {
public boolean isRequesterPaysEnabled(String bucketName) throws AmazonServiceException, AmazonClientException {
return delegate.isRequesterPaysEnabled(bucketName);
}
@Override
public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest) throws AmazonClientException, AmazonServiceException {
return delegate.listNextBatchOfObjects(listNextBatchOfObjectsRequest);
}
@Override
public VersionListing listNextBatchOfVersions(ListNextBatchOfVersionsRequest listNextBatchOfVersionsRequest) throws AmazonClientException, AmazonServiceException {
return delegate.listNextBatchOfVersions(listNextBatchOfVersionsRequest);
}
@Override
public Owner getS3AccountOwner(GetS3AccountOwnerRequest getS3AccountOwnerRequest) throws AmazonClientException, AmazonServiceException {
return delegate.getS3AccountOwner(getS3AccountOwnerRequest);
}
@Override
public BucketLoggingConfiguration getBucketLoggingConfiguration(GetBucketLoggingConfigurationRequest getBucketLoggingConfigurationRequest) throws AmazonClientException, AmazonServiceException {
return delegate.getBucketLoggingConfiguration(getBucketLoggingConfigurationRequest);
}
@Override
public BucketVersioningConfiguration getBucketVersioningConfiguration(GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest) throws AmazonClientException, AmazonServiceException {
return delegate.getBucketVersioningConfiguration(getBucketVersioningConfigurationRequest);
}
@Override
public BucketLifecycleConfiguration getBucketLifecycleConfiguration(GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest) {
return delegate.getBucketLifecycleConfiguration(getBucketLifecycleConfigurationRequest);
}
@Override
public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest) {
return delegate.getBucketCrossOriginConfiguration(getBucketCrossOriginConfigurationRequest);
}
@Override
public BucketTaggingConfiguration getBucketTaggingConfiguration(GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest) {
return delegate.getBucketTaggingConfiguration(getBucketTaggingConfigurationRequest);
}
@Override
public BucketNotificationConfiguration getBucketNotificationConfiguration(GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest) throws AmazonClientException, AmazonServiceException {
return delegate.getBucketNotificationConfiguration(getBucketNotificationConfigurationRequest);
}
@Override
public BucketReplicationConfiguration getBucketReplicationConfiguration(GetBucketReplicationConfigurationRequest getBucketReplicationConfigurationRequest) throws AmazonServiceException, AmazonClientException {
return delegate.getBucketReplicationConfiguration(getBucketReplicationConfigurationRequest);
}
}

12
pom.xml
View File

@ -354,7 +354,7 @@
<artifactId>joda-time</artifactId>
<!-- joda 2.0 moved to using volatile fields for datetime -->
<!-- When updating to a new version, make sure to update our copy of BaseDateTime -->
<version>2.8</version>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>org.joda</groupId>
@ -875,10 +875,16 @@
<version>2.4.1</version>
</plugin>
<plugin>
<!-- We just declare which plugin version to use. Each project can have then its own settings -->
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<version>2.7</version>
<!-- add some additonal binary types to prevent maven from
screwing them up with resource filtering -->
<configuration>
<nonFilteredFileExtensions>
<nonFilteredFileExtension>ico</nonFilteredFileExtension>
</nonFilteredFileExtensions>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
@ -976,7 +982,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-eclipse-plugin</artifactId>
<version>2.9</version>
<version>2.10</version>
<configuration>
<buildOutputDirectory>eclipse-build</buildOutputDirectory>
<downloadSources>true</downloadSources>

View File

@ -31,37 +31,31 @@
# Load test utilities
load packaging_test_utils
# Cleans everything for the 1st execution
setup() {
if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
clean_before_test
fi
skip_not_tar_gz
}
##################################
# Install TAR GZ package
##################################
@test "[TAR] tar command is available" {
skip_not_tar_gz
# Cleans everything for the 1st execution
clean_before_test
run tar --version
[ "$status" -eq 0 ]
}
@test "[TAR] archive is available" {
skip_not_tar_gz
count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l)
[ "$count" -eq 1 ]
}
@test "[TAR] archive is not installed" {
skip_not_tar_gz
count=$(find /tmp -type d -name 'elasticsearch*' | wc -l)
[ "$count" -eq 0 ]
}
@test "[TAR] install archive" {
skip_not_tar_gz
# Install the archive
install_archive
@ -73,8 +67,6 @@ setup() {
# Check that the archive is correctly installed
##################################
@test "[TAR] verify archive installation" {
skip_not_tar_gz
verify_archive_installation "/tmp/elasticsearch"
}
@ -82,14 +74,11 @@ setup() {
# Check that Elasticsearch is working
##################################
@test "[TAR] test elasticsearch" {
skip_not_tar_gz
start_elasticsearch_service
run_elasticsearch_tests
stop_elasticsearch_service
run rm -rf "/tmp/elasticsearch"
[ "$status" -eq 0 ]
rm -rf "/tmp/elasticsearch"
}

View File

@ -242,34 +242,27 @@ install_archive() {
eshome="$1"
fi
run tar -xzvf elasticsearch*.tar.gz -C "$eshome" >&2
[ "$status" -eq 0 ]
tar -xzvf elasticsearch*.tar.gz -C "$eshome"
run find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \;
[ "$status" -eq 0 ]
find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \;
# ES cannot run as root so create elasticsearch user & group if needed
if ! getent group "elasticsearch" > /dev/null 2>&1 ; then
if is_dpkg; then
run addgroup --system "elasticsearch"
[ "$status" -eq 0 ]
addgroup --system "elasticsearch"
else
run groupadd -r "elasticsearch"
[ "$status" -eq 0 ]
groupadd -r "elasticsearch"
fi
fi
if ! id "elasticsearch" > /dev/null 2>&1 ; then
if is_dpkg; then
run adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch"
[ "$status" -eq 0 ]
adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch"
else
run useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch"
[ "$status" -eq 0 ]
useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch"
fi
fi
run chown -R elasticsearch:elasticsearch "$eshome/elasticsearch"
[ "$status" -eq 0 ]
chown -R elasticsearch:elasticsearch "$eshome/elasticsearch"
}
@ -354,11 +347,12 @@ clean_before_test() {
}
start_elasticsearch_service() {
if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then
run /bin/su -s /bin/sh -c '/tmp/elasticsearch/bin/elasticsearch -d -p /tmp/elasticsearch/elasticsearch.pid' elasticsearch
[ "$status" -eq 0 ]
# su and the Elasticsearch init script work together to break bats.
# sudo isolates bats enough from the init script so everything continues
# to tick along
sudo -u elasticsearch /tmp/elasticsearch/bin/elasticsearch -d \
-p /tmp/elasticsearch/elasticsearch.pid
elif is_systemd; then
run systemctl daemon-reload
[ "$status" -eq 0 ]
@ -383,9 +377,8 @@ start_elasticsearch_service() {
pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
[ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
run ps $pid
[ "$status" -eq 0 ]
echo "Looking for elasticsearch pid...."
ps $pid
elif is_systemd; then
run systemctl is-active elasticsearch.service
[ "$status" -eq 0 ]
@ -400,14 +393,11 @@ start_elasticsearch_service() {
}
stop_elasticsearch_service() {
if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
[ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
run kill -SIGTERM $pid
[ "$status" -eq 0 ]
kill -SIGTERM $pid
elif is_systemd; then
run systemctl stop elasticsearch.service
[ "$status" -eq 0 ]
@ -428,36 +418,63 @@ stop_elasticsearch_service() {
# Waits for Elasticsearch to reach a given status (defaults to "green")
wait_for_elasticsearch_status() {
local status="green"
local desired_status="green"
if [ "x$1" != "x" ]; then
status="$1"
fi
# Try to connect to elasticsearch and wait for expected status
wget --quiet --retry-connrefused --waitretry=1 --timeout=60 \
--output-document=/dev/null "http://localhost:9200/_cluster/health?wait_for_status=$status&timeout=60s" || true
# Checks the cluster health
curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
if [ $? -ne 0 ]; then
echo "error when checking cluster health" >&2
exit 1
echo "Making sure elasticsearch is up..."
wget -O - --retry-connrefused --waitretry=1 --timeout=60 http://localhost:9200 || {
echo "Looks like elasticsearch never started. Here is its log:"
if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
cat /tmp/elasticsearch/log/elasticsearch.log
else
if [ -e '/var/log/elasticsearch/elasticsearch.log' ]; then
cat /var/log/elasticsearch/elasticsearch.log
else
echo "The elasticsearch log doesn't exist. Maybe /vag/log/messages has something:"
tail -n20 /var/log/messages
fi
fi
false
}
echo "Tring to connect to elasticsearch and wait for expected status..."
curl -sS "http://localhost:9200/_cluster/health?wait_for_status=$desired_status&timeout=60s&pretty"
if [ $? -eq 0 ]; then
echo "Connected"
else
echo "Unable to connect to Elastisearch"
false
fi
echo "Checking that the cluster health matches the waited for status..."
run curl -sS -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
if [ "$status" -ne 0 ]; then
echo "error when checking cluster health. code=$status output="
echo $output
false
fi
echo $output | grep $desired_status || {
echo "unexpected status: '$output' wanted '$desired_status'"
false
}
}
# Executes some very basic Elasticsearch tests
run_elasticsearch_tests() {
# TODO this assertion is the same the one made when waiting for
# elasticsearch to start
run curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
[ "$status" -eq 0 ]
echo "$output" | grep -w "green"
run curl -XPOST 'http://localhost:9200/library/book/1?refresh=true' -d '{"title": "Elasticsearch - The Definitive Guide"}' 2>&1
[ "$status" -eq 0 ]
curl -s -XPOST 'http://localhost:9200/library/book/1?refresh=true&pretty' -d '{
"title": "Elasticsearch - The Definitive Guide"
}'
run curl -XGET 'http://localhost:9200/_cat/count?h=count&v=false'
[ "$status" -eq 0 ]
echo "$output" | grep -w "1"
curl -s -XGET 'http://localhost:9200/_cat/count?h=count&v=false&pretty' |
grep -w "1"
run curl -XDELETE 'http://localhost:9200/_all'
[ "$status" -eq 0 ]
curl -s -XDELETE 'http://localhost:9200/_all'
}